2 * ipr.c -- driver for IBM Power Linux RAID adapters
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
6 * Copyright (C) 2003, 2004 IBM Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 * This driver is used to control the following SCSI adapters:
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
50 * - Tagged command queuing
51 * - Adapter microcode download
53 * - SCSI device hot plug
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
92 static LIST_HEAD(ipr_ioa_head);
93 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94 static unsigned int ipr_max_speed = 1;
95 static int ipr_testmode = 0;
96 static unsigned int ipr_fastfail = 0;
97 static unsigned int ipr_transop_timeout = 0;
98 static unsigned int ipr_debug = 0;
99 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100 static unsigned int ipr_dual_ioa_raid = 1;
101 static unsigned int ipr_number_of_msix = 16;
102 static unsigned int ipr_fast_reboot;
103 static DEFINE_SPINLOCK(ipr_driver_lock);
105 /* This table describes the differences between DMA controller chips */
106 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
107 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
110 .cache_line_size = 0x20,
114 .set_interrupt_mask_reg = 0x0022C,
115 .clr_interrupt_mask_reg = 0x00230,
116 .clr_interrupt_mask_reg32 = 0x00230,
117 .sense_interrupt_mask_reg = 0x0022C,
118 .sense_interrupt_mask_reg32 = 0x0022C,
119 .clr_interrupt_reg = 0x00228,
120 .clr_interrupt_reg32 = 0x00228,
121 .sense_interrupt_reg = 0x00224,
122 .sense_interrupt_reg32 = 0x00224,
123 .ioarrin_reg = 0x00404,
124 .sense_uproc_interrupt_reg = 0x00214,
125 .sense_uproc_interrupt_reg32 = 0x00214,
126 .set_uproc_interrupt_reg = 0x00214,
127 .set_uproc_interrupt_reg32 = 0x00214,
128 .clr_uproc_interrupt_reg = 0x00218,
129 .clr_uproc_interrupt_reg32 = 0x00218
132 { /* Snipe and Scamp */
135 .cache_line_size = 0x20,
139 .set_interrupt_mask_reg = 0x00288,
140 .clr_interrupt_mask_reg = 0x0028C,
141 .clr_interrupt_mask_reg32 = 0x0028C,
142 .sense_interrupt_mask_reg = 0x00288,
143 .sense_interrupt_mask_reg32 = 0x00288,
144 .clr_interrupt_reg = 0x00284,
145 .clr_interrupt_reg32 = 0x00284,
146 .sense_interrupt_reg = 0x00280,
147 .sense_interrupt_reg32 = 0x00280,
148 .ioarrin_reg = 0x00504,
149 .sense_uproc_interrupt_reg = 0x00290,
150 .sense_uproc_interrupt_reg32 = 0x00290,
151 .set_uproc_interrupt_reg = 0x00290,
152 .set_uproc_interrupt_reg32 = 0x00290,
153 .clr_uproc_interrupt_reg = 0x00294,
154 .clr_uproc_interrupt_reg32 = 0x00294
160 .cache_line_size = 0x20,
164 .set_interrupt_mask_reg = 0x00010,
165 .clr_interrupt_mask_reg = 0x00018,
166 .clr_interrupt_mask_reg32 = 0x0001C,
167 .sense_interrupt_mask_reg = 0x00010,
168 .sense_interrupt_mask_reg32 = 0x00014,
169 .clr_interrupt_reg = 0x00008,
170 .clr_interrupt_reg32 = 0x0000C,
171 .sense_interrupt_reg = 0x00000,
172 .sense_interrupt_reg32 = 0x00004,
173 .ioarrin_reg = 0x00070,
174 .sense_uproc_interrupt_reg = 0x00020,
175 .sense_uproc_interrupt_reg32 = 0x00024,
176 .set_uproc_interrupt_reg = 0x00020,
177 .set_uproc_interrupt_reg32 = 0x00024,
178 .clr_uproc_interrupt_reg = 0x00028,
179 .clr_uproc_interrupt_reg32 = 0x0002C,
180 .init_feedback_reg = 0x0005C,
181 .dump_addr_reg = 0x00064,
182 .dump_data_reg = 0x00068,
183 .endian_swap_reg = 0x00084
188 static const struct ipr_chip_t ipr_chip[] = {
189 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, true, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
194 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
196 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
197 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
198 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
201 static int ipr_max_bus_speeds[] = {
202 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
205 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
206 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
207 module_param_named(max_speed, ipr_max_speed, uint, 0);
208 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
209 module_param_named(log_level, ipr_log_level, uint, 0);
210 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
211 module_param_named(testmode, ipr_testmode, int, 0);
212 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
213 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
214 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
215 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
216 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
217 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
218 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
219 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
220 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
221 module_param_named(max_devs, ipr_max_devs, int, 0);
222 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
223 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
224 module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
225 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:16)");
226 module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
227 MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
228 MODULE_LICENSE("GPL");
229 MODULE_VERSION(IPR_DRIVER_VERSION);
231 /* A constant array of IOASCs/URCs/Error Messages */
233 struct ipr_error_table_t ipr_error_table[] = {
234 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
235 "8155: An unknown error was received"},
237 "Soft underlength error"},
239 "Command to be cancelled not found"},
241 "Qualified success"},
242 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
243 "FFFE: Soft device bus error recovered by the IOA"},
244 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
245 "4101: Soft device bus fabric error"},
246 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
247 "FFFC: Logical block guard error recovered by the device"},
248 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
249 "FFFC: Logical block reference tag error recovered by the device"},
250 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
251 "4171: Recovered scatter list tag / sequence number error"},
252 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
253 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
254 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
255 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
256 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
257 "FFFD: Recovered logical block reference tag error detected by the IOA"},
258 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
259 "FFFD: Logical block guard error recovered by the IOA"},
260 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
261 "FFF9: Device sector reassign successful"},
262 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
263 "FFF7: Media error recovered by device rewrite procedures"},
264 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
265 "7001: IOA sector reassignment successful"},
266 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
267 "FFF9: Soft media error. Sector reassignment recommended"},
268 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
269 "FFF7: Media error recovered by IOA rewrite procedures"},
270 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
271 "FF3D: Soft PCI bus error recovered by the IOA"},
272 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
273 "FFF6: Device hardware error recovered by the IOA"},
274 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
275 "FFF6: Device hardware error recovered by the device"},
276 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
277 "FF3D: Soft IOA error recovered by the IOA"},
278 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
279 "FFFA: Undefined device response recovered by the IOA"},
280 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
281 "FFF6: Device bus error, message or command phase"},
282 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
283 "FFFE: Task Management Function failed"},
284 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
285 "FFF6: Failure prediction threshold exceeded"},
286 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
287 "8009: Impending cache battery pack failure"},
289 "Logical Unit in process of becoming ready"},
291 "Initializing command required"},
293 "34FF: Disk device format in progress"},
295 "Logical unit not accessible, target port in unavailable state"},
296 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
297 "9070: IOA requested reset"},
299 "Synchronization required"},
301 "IOA microcode download required"},
303 "Device bus connection is prohibited by host"},
305 "No ready, IOA shutdown"},
307 "Not ready, IOA has been shutdown"},
308 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
309 "3020: Storage subsystem configuration error"},
311 "FFF5: Medium error, data unreadable, recommend reassign"},
313 "7000: Medium error, data unreadable, do not reassign"},
314 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
315 "FFF3: Disk media format bad"},
316 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
317 "3002: Addressed device failed to respond to selection"},
318 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
319 "3100: Device bus error"},
320 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
321 "3109: IOA timed out a device command"},
323 "3120: SCSI bus is not operational"},
324 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
325 "4100: Hard device bus fabric error"},
326 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
327 "310C: Logical block guard error detected by the device"},
328 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
329 "310C: Logical block reference tag error detected by the device"},
330 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
331 "4170: Scatter list tag / sequence number error"},
332 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
333 "8150: Logical block CRC error on IOA to Host transfer"},
334 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
335 "4170: Logical block sequence number error on IOA to Host transfer"},
336 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
337 "310D: Logical block reference tag error detected by the IOA"},
338 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
339 "310D: Logical block guard error detected by the IOA"},
340 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
341 "9000: IOA reserved area data check"},
342 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
343 "9001: IOA reserved area invalid data pattern"},
344 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
345 "9002: IOA reserved area LRC error"},
346 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
347 "Hardware Error, IOA metadata access error"},
348 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
349 "102E: Out of alternate sectors for disk storage"},
350 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
351 "FFF4: Data transfer underlength error"},
352 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
353 "FFF4: Data transfer overlength error"},
354 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
355 "3400: Logical unit failure"},
356 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
357 "FFF4: Device microcode is corrupt"},
358 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
359 "8150: PCI bus error"},
361 "Unsupported device bus message received"},
362 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
363 "FFF4: Disk device problem"},
364 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
365 "8150: Permanent IOA failure"},
366 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
367 "3010: Disk device returned wrong response to IOA"},
368 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
369 "8151: IOA microcode error"},
371 "Device bus status error"},
372 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
373 "8157: IOA error requiring IOA reset to recover"},
375 "ATA device status error"},
377 "Message reject received from the device"},
378 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
379 "8008: A permanent cache battery pack failure occurred"},
380 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
381 "9090: Disk unit has been modified after the last known status"},
382 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
383 "9081: IOA detected device error"},
384 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
385 "9082: IOA detected device error"},
386 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
387 "3110: Device bus error, message or command phase"},
388 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
389 "3110: SAS Command / Task Management Function failed"},
390 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
391 "9091: Incorrect hardware configuration change has been detected"},
392 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
393 "9073: Invalid multi-adapter configuration"},
394 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
395 "4010: Incorrect connection between cascaded expanders"},
396 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
397 "4020: Connections exceed IOA design limits"},
398 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
399 "4030: Incorrect multipath connection"},
400 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
401 "4110: Unsupported enclosure function"},
402 {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
403 "4120: SAS cable VPD cannot be read"},
404 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
405 "FFF4: Command to logical unit failed"},
407 "Illegal request, invalid request type or request packet"},
409 "Illegal request, invalid resource handle"},
411 "Illegal request, commands not allowed to this device"},
413 "Illegal request, command not allowed to a secondary adapter"},
415 "Illegal request, command not allowed to a non-optimized resource"},
417 "Illegal request, invalid field in parameter list"},
419 "Illegal request, parameter not supported"},
421 "Illegal request, parameter value invalid"},
423 "Illegal request, command sequence error"},
425 "Illegal request, dual adapter support not enabled"},
427 "Illegal request, another cable connector was physically disabled"},
429 "Illegal request, inconsistent group id/group count"},
430 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
431 "9031: Array protection temporarily suspended, protection resuming"},
432 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
433 "9040: Array protection temporarily suspended, protection resuming"},
434 {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
435 "4080: IOA exceeded maximum operating temperature"},
436 {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
437 "4085: Service required"},
438 {0x060B8100, 0, IPR_DEFAULT_LOG_LEVEL,
439 "4086: SAS Adapter Hardware Configuration Error"},
440 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
441 "3140: Device bus not ready to ready transition"},
442 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
443 "FFFB: SCSI bus was reset"},
445 "FFFE: SCSI bus transition to single ended"},
447 "FFFE: SCSI bus transition to LVD"},
448 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
449 "FFFB: SCSI bus was reset by another initiator"},
450 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
451 "3029: A device replacement has occurred"},
452 {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
453 "4102: Device bus fabric performance degradation"},
454 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
455 "9051: IOA cache data exists for a missing or failed device"},
456 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
457 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
458 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
459 "9025: Disk unit is not supported at its physical location"},
460 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
461 "3020: IOA detected a SCSI bus configuration error"},
462 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
463 "3150: SCSI bus configuration error"},
464 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
465 "9074: Asymmetric advanced function disk configuration"},
466 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
467 "4040: Incomplete multipath connection between IOA and enclosure"},
468 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
469 "4041: Incomplete multipath connection between enclosure and device"},
470 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
471 "9075: Incomplete multipath connection between IOA and remote IOA"},
472 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
473 "9076: Configuration error, missing remote IOA"},
474 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
475 "4050: Enclosure does not support a required multipath function"},
476 {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
477 "4121: Configuration error, required cable is missing"},
478 {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
479 "4122: Cable is not plugged into the correct location on remote IOA"},
480 {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
481 "4123: Configuration error, invalid cable vital product data"},
482 {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
483 "4124: Configuration error, both cable ends are plugged into the same IOA"},
484 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
485 "4070: Logically bad block written on device"},
486 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
487 "9041: Array protection temporarily suspended"},
488 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
489 "9042: Corrupt array parity detected on specified device"},
490 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
491 "9030: Array no longer protected due to missing or failed disk unit"},
492 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
493 "9071: Link operational transition"},
494 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
495 "9072: Link not operational transition"},
496 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
497 "9032: Array exposed but still protected"},
498 {0x066B8300, 0, IPR_DEBUG_LOG_LEVEL,
499 "70DD: Device forced failed by disrupt device command"},
500 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
501 "4061: Multipath redundancy level got better"},
502 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
503 "4060: Multipath redundancy level got worse"},
504 {0x06808100, 0, IPR_DEBUG_LOG_LEVEL,
505 "9083: Device raw mode enabled"},
506 {0x06808200, 0, IPR_DEBUG_LOG_LEVEL,
507 "9084: Device raw mode disabled"},
509 "Failure due to other device"},
510 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
511 "9008: IOA does not support functions expected by devices"},
512 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
513 "9010: Cache data associated with attached devices cannot be found"},
514 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
515 "9011: Cache data belongs to devices other than those attached"},
516 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
517 "9020: Array missing 2 or more devices with only 1 device present"},
518 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
519 "9021: Array missing 2 or more devices with 2 or more devices present"},
520 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
521 "9022: Exposed array is missing a required device"},
522 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
523 "9023: Array member(s) not at required physical locations"},
524 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
525 "9024: Array not functional due to present hardware configuration"},
526 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
527 "9026: Array not functional due to present hardware configuration"},
528 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
529 "9027: Array is missing a device and parity is out of sync"},
530 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
531 "9028: Maximum number of arrays already exist"},
532 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
533 "9050: Required cache data cannot be located for a disk unit"},
534 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
535 "9052: Cache data exists for a device that has been modified"},
536 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
537 "9054: IOA resources not available due to previous problems"},
538 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
539 "9092: Disk unit requires initialization before use"},
540 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
541 "9029: Incorrect hardware configuration change has been detected"},
542 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
543 "9060: One or more disk pairs are missing from an array"},
544 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
545 "9061: One or more disks are missing from an array"},
546 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
547 "9062: One or more disks are missing from an array"},
548 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
549 "9063: Maximum number of functional arrays has been exceeded"},
551 "Data protect, other volume set problem"},
553 "Aborted command, invalid descriptor"},
555 "Target operating conditions have changed, dual adapter takeover"},
557 "Aborted command, medium removal prevented"},
559 "Command terminated by host"},
561 "Aborted command, command terminated by host"}
564 static const struct ipr_ses_table_entry ipr_ses_table[] = {
565 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
566 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
567 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
568 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
569 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
570 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
571 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
572 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
573 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
574 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
575 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
576 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
577 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
581 * Function Prototypes
583 static int ipr_reset_alert(struct ipr_cmnd *);
584 static void ipr_process_ccn(struct ipr_cmnd *);
585 static void ipr_process_error(struct ipr_cmnd *);
586 static void ipr_reset_ioa_job(struct ipr_cmnd *);
587 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
588 enum ipr_shutdown_type);
590 #ifdef CONFIG_SCSI_IPR_TRACE
592 * ipr_trc_hook - Add a trace entry to the driver trace
593 * @ipr_cmd: ipr command struct
595 * @add_data: additional data
600 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
601 u8 type, u32 add_data)
603 struct ipr_trace_entry *trace_entry;
604 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
605 unsigned int trace_index;
607 trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
608 trace_entry = &ioa_cfg->trace[trace_index];
609 trace_entry->time = jiffies;
610 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
611 trace_entry->type = type;
612 if (ipr_cmd->ioa_cfg->sis64)
613 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
615 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
616 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
617 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
618 trace_entry->u.add_data = add_data;
622 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
626 * ipr_lock_and_done - Acquire lock and complete command
627 * @ipr_cmd: ipr command struct
632 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
634 unsigned long lock_flags;
635 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
637 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
638 ipr_cmd->done(ipr_cmd);
639 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
643 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
644 * @ipr_cmd: ipr command struct
649 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
651 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
652 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
653 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
654 dma_addr_t dma_addr = ipr_cmd->dma_addr;
657 hrrq_id = ioarcb->cmd_pkt.hrrq_id;
658 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
659 ioarcb->cmd_pkt.hrrq_id = hrrq_id;
660 ioarcb->data_transfer_length = 0;
661 ioarcb->read_data_transfer_length = 0;
662 ioarcb->ioadl_len = 0;
663 ioarcb->read_ioadl_len = 0;
665 if (ipr_cmd->ioa_cfg->sis64) {
666 ioarcb->u.sis64_addr_data.data_ioadl_addr =
667 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
668 ioasa64->u.gata.status = 0;
670 ioarcb->write_ioadl_addr =
671 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
672 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
673 ioasa->u.gata.status = 0;
676 ioasa->hdr.ioasc = 0;
677 ioasa->hdr.residual_data_len = 0;
678 ipr_cmd->scsi_cmd = NULL;
680 ipr_cmd->sense_buffer[0] = 0;
681 ipr_cmd->dma_use_sg = 0;
685 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
686 * @ipr_cmd: ipr command struct
691 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
692 void (*fast_done) (struct ipr_cmnd *))
694 ipr_reinit_ipr_cmnd(ipr_cmd);
695 ipr_cmd->u.scratch = 0;
696 ipr_cmd->sibling = NULL;
697 ipr_cmd->eh_comp = NULL;
698 ipr_cmd->fast_done = fast_done;
699 timer_setup(&ipr_cmd->timer, NULL, 0);
703 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
704 * @ioa_cfg: ioa config struct
707 * pointer to ipr command struct
710 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
712 struct ipr_cmnd *ipr_cmd = NULL;
714 if (likely(!list_empty(&hrrq->hrrq_free_q))) {
715 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
716 struct ipr_cmnd, queue);
717 list_del(&ipr_cmd->queue);
725 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
726 * @ioa_cfg: ioa config struct
729 * pointer to ipr command struct
732 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
734 struct ipr_cmnd *ipr_cmd =
735 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
736 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
741 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
742 * @ioa_cfg: ioa config struct
743 * @clr_ints: interrupts to clear
745 * This function masks all interrupts on the adapter, then clears the
746 * interrupts specified in the mask
751 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
754 volatile u32 int_reg;
757 /* Stop new interrupts */
758 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
759 spin_lock(&ioa_cfg->hrrq[i]._lock);
760 ioa_cfg->hrrq[i].allow_interrupts = 0;
761 spin_unlock(&ioa_cfg->hrrq[i]._lock);
764 /* Set interrupt mask to stop all new interrupts */
766 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
768 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
770 /* Clear any pending interrupts */
772 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
773 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
774 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
778 * ipr_save_pcix_cmd_reg - Save PCI-X command register
779 * @ioa_cfg: ioa config struct
782 * 0 on success / -EIO on failure
784 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
786 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
788 if (pcix_cmd_reg == 0)
791 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
792 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
793 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
797 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
802 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
803 * @ioa_cfg: ioa config struct
806 * 0 on success / -EIO on failure
808 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
810 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
813 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
814 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
815 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
824 * __ipr_sata_eh_done - done function for aborted SATA commands
825 * @ipr_cmd: ipr command struct
827 * This function is invoked for ops generated to SATA
828 * devices which are being aborted.
833 static void __ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
835 struct ata_queued_cmd *qc = ipr_cmd->qc;
836 struct ipr_sata_port *sata_port = qc->ap->private_data;
838 qc->err_mask |= AC_ERR_OTHER;
839 sata_port->ioasa.status |= ATA_BUSY;
841 if (ipr_cmd->eh_comp)
842 complete(ipr_cmd->eh_comp);
843 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
847 * ipr_sata_eh_done - done function for aborted SATA commands
848 * @ipr_cmd: ipr command struct
850 * This function is invoked for ops generated to SATA
851 * devices which are being aborted.
856 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
858 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
859 unsigned long hrrq_flags;
861 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
862 __ipr_sata_eh_done(ipr_cmd);
863 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
867 * __ipr_scsi_eh_done - mid-layer done function for aborted ops
868 * @ipr_cmd: ipr command struct
870 * This function is invoked by the interrupt handler for
871 * ops generated by the SCSI mid-layer which are being aborted.
876 static void __ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
878 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
880 scsi_cmd->result |= (DID_ERROR << 16);
882 scsi_dma_unmap(ipr_cmd->scsi_cmd);
883 scsi_cmd->scsi_done(scsi_cmd);
884 if (ipr_cmd->eh_comp)
885 complete(ipr_cmd->eh_comp);
886 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
890 * ipr_scsi_eh_done - mid-layer done function for aborted ops
891 * @ipr_cmd: ipr command struct
893 * This function is invoked by the interrupt handler for
894 * ops generated by the SCSI mid-layer which are being aborted.
899 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
901 unsigned long hrrq_flags;
902 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
904 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
905 __ipr_scsi_eh_done(ipr_cmd);
906 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
910 * ipr_fail_all_ops - Fails all outstanding ops.
911 * @ioa_cfg: ioa config struct
913 * This function fails all outstanding ops.
918 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
920 struct ipr_cmnd *ipr_cmd, *temp;
921 struct ipr_hrr_queue *hrrq;
924 for_each_hrrq(hrrq, ioa_cfg) {
925 spin_lock(&hrrq->_lock);
926 list_for_each_entry_safe(ipr_cmd,
927 temp, &hrrq->hrrq_pending_q, queue) {
928 list_del(&ipr_cmd->queue);
930 ipr_cmd->s.ioasa.hdr.ioasc =
931 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
932 ipr_cmd->s.ioasa.hdr.ilid =
933 cpu_to_be32(IPR_DRIVER_ILID);
935 if (ipr_cmd->scsi_cmd)
936 ipr_cmd->done = __ipr_scsi_eh_done;
937 else if (ipr_cmd->qc)
938 ipr_cmd->done = __ipr_sata_eh_done;
940 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
941 IPR_IOASC_IOA_WAS_RESET);
942 del_timer(&ipr_cmd->timer);
943 ipr_cmd->done(ipr_cmd);
945 spin_unlock(&hrrq->_lock);
951 * ipr_send_command - Send driver initiated requests.
952 * @ipr_cmd: ipr command struct
954 * This function sends a command to the adapter using the correct write call.
955 * In the case of sis64, calculate the ioarcb size required. Then or in the
961 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
963 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
964 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
966 if (ioa_cfg->sis64) {
967 /* The default size is 256 bytes */
968 send_dma_addr |= 0x1;
970 /* If the number of ioadls * size of ioadl > 128 bytes,
971 then use a 512 byte ioarcb */
972 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
973 send_dma_addr |= 0x4;
974 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
976 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
980 * ipr_do_req - Send driver initiated requests.
981 * @ipr_cmd: ipr command struct
982 * @done: done function
983 * @timeout_func: timeout function
984 * @timeout: timeout value
986 * This function sends the specified command to the adapter with the
987 * timeout given. The done function is invoked on command completion.
992 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
993 void (*done) (struct ipr_cmnd *),
994 void (*timeout_func) (struct timer_list *), u32 timeout)
996 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
998 ipr_cmd->done = done;
1000 ipr_cmd->timer.expires = jiffies + timeout;
1001 ipr_cmd->timer.function = timeout_func;
1003 add_timer(&ipr_cmd->timer);
1005 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
1007 ipr_send_command(ipr_cmd);
1011 * ipr_internal_cmd_done - Op done function for an internally generated op.
1012 * @ipr_cmd: ipr command struct
1014 * This function is the op done function for an internally generated,
1015 * blocking op. It simply wakes the sleeping thread.
1020 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
1022 if (ipr_cmd->sibling)
1023 ipr_cmd->sibling = NULL;
1025 complete(&ipr_cmd->completion);
1029 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
1030 * @ipr_cmd: ipr command struct
1031 * @dma_addr: dma address
1032 * @len: transfer length
1033 * @flags: ioadl flag value
1035 * This function initializes an ioadl in the case where there is only a single
1041 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
1044 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1045 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
1047 ipr_cmd->dma_use_sg = 1;
1049 if (ipr_cmd->ioa_cfg->sis64) {
1050 ioadl64->flags = cpu_to_be32(flags);
1051 ioadl64->data_len = cpu_to_be32(len);
1052 ioadl64->address = cpu_to_be64(dma_addr);
1054 ipr_cmd->ioarcb.ioadl_len =
1055 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1056 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1058 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1059 ioadl->address = cpu_to_be32(dma_addr);
1061 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1062 ipr_cmd->ioarcb.read_ioadl_len =
1063 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1064 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1066 ipr_cmd->ioarcb.ioadl_len =
1067 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1068 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1074 * ipr_send_blocking_cmd - Send command and sleep on its completion.
1075 * @ipr_cmd: ipr command struct
1076 * @timeout_func: function to invoke if command times out
1082 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1083 void (*timeout_func) (struct timer_list *),
1086 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1088 init_completion(&ipr_cmd->completion);
1089 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1091 spin_unlock_irq(ioa_cfg->host->host_lock);
1092 wait_for_completion(&ipr_cmd->completion);
1093 spin_lock_irq(ioa_cfg->host->host_lock);
1096 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1100 if (ioa_cfg->hrrq_num == 1)
1103 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1104 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1110 * ipr_send_hcam - Send an HCAM to the adapter.
1111 * @ioa_cfg: ioa config struct
1113 * @hostrcb: hostrcb struct
1115 * This function will send a Host Controlled Async command to the adapter.
1116 * If HCAMs are currently not allowed to be issued to the adapter, it will
1117 * place the hostrcb on the free queue.
1122 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1123 struct ipr_hostrcb *hostrcb)
1125 struct ipr_cmnd *ipr_cmd;
1126 struct ipr_ioarcb *ioarcb;
1128 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1129 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1130 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1131 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1133 ipr_cmd->u.hostrcb = hostrcb;
1134 ioarcb = &ipr_cmd->ioarcb;
1136 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1137 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1138 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1139 ioarcb->cmd_pkt.cdb[1] = type;
1140 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1141 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1143 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1144 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1146 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1147 ipr_cmd->done = ipr_process_ccn;
1149 ipr_cmd->done = ipr_process_error;
1151 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1153 ipr_send_command(ipr_cmd);
1155 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1160 * ipr_update_ata_class - Update the ata class in the resource entry
1161 * @res: resource entry struct
1162 * @proto: cfgte device bus protocol value
1167 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1170 case IPR_PROTO_SATA:
1171 case IPR_PROTO_SAS_STP:
1172 res->ata_class = ATA_DEV_ATA;
1174 case IPR_PROTO_SATA_ATAPI:
1175 case IPR_PROTO_SAS_STP_ATAPI:
1176 res->ata_class = ATA_DEV_ATAPI;
1179 res->ata_class = ATA_DEV_UNKNOWN;
1185 * ipr_init_res_entry - Initialize a resource entry struct.
1186 * @res: resource entry struct
1187 * @cfgtew: config table entry wrapper struct
1192 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1193 struct ipr_config_table_entry_wrapper *cfgtew)
1197 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1198 struct ipr_resource_entry *gscsi_res = NULL;
1200 res->needs_sync_complete = 0;
1203 res->del_from_ml = 0;
1204 res->resetting_device = 0;
1205 res->reset_occurred = 0;
1207 res->sata_port = NULL;
1209 if (ioa_cfg->sis64) {
1210 proto = cfgtew->u.cfgte64->proto;
1211 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1212 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1213 res->qmodel = IPR_QUEUEING_MODEL64(res);
1214 res->type = cfgtew->u.cfgte64->res_type;
1216 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1217 sizeof(res->res_path));
1220 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1221 sizeof(res->dev_lun.scsi_lun));
1222 res->lun = scsilun_to_int(&res->dev_lun);
1224 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1225 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1226 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1228 res->target = gscsi_res->target;
1233 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1234 ioa_cfg->max_devs_supported);
1235 set_bit(res->target, ioa_cfg->target_ids);
1237 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1238 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1240 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1241 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1242 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1243 ioa_cfg->max_devs_supported);
1244 set_bit(res->target, ioa_cfg->array_ids);
1245 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1246 res->bus = IPR_VSET_VIRTUAL_BUS;
1247 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1248 ioa_cfg->max_devs_supported);
1249 set_bit(res->target, ioa_cfg->vset_ids);
1251 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1252 ioa_cfg->max_devs_supported);
1253 set_bit(res->target, ioa_cfg->target_ids);
1256 proto = cfgtew->u.cfgte->proto;
1257 res->qmodel = IPR_QUEUEING_MODEL(res);
1258 res->flags = cfgtew->u.cfgte->flags;
1259 if (res->flags & IPR_IS_IOA_RESOURCE)
1260 res->type = IPR_RES_TYPE_IOAFP;
1262 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1264 res->bus = cfgtew->u.cfgte->res_addr.bus;
1265 res->target = cfgtew->u.cfgte->res_addr.target;
1266 res->lun = cfgtew->u.cfgte->res_addr.lun;
1267 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1270 ipr_update_ata_class(res, proto);
1274 * ipr_is_same_device - Determine if two devices are the same.
1275 * @res: resource entry struct
1276 * @cfgtew: config table entry wrapper struct
1279 * 1 if the devices are the same / 0 otherwise
1281 static int ipr_is_same_device(struct ipr_resource_entry *res,
1282 struct ipr_config_table_entry_wrapper *cfgtew)
1284 if (res->ioa_cfg->sis64) {
1285 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1286 sizeof(cfgtew->u.cfgte64->dev_id)) &&
1287 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1288 sizeof(cfgtew->u.cfgte64->lun))) {
1292 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1293 res->target == cfgtew->u.cfgte->res_addr.target &&
1294 res->lun == cfgtew->u.cfgte->res_addr.lun)
1302 * __ipr_format_res_path - Format the resource path for printing.
1303 * @res_path: resource path
1305 * @len: length of buffer provided
1310 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1316 p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1317 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1318 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1324 * ipr_format_res_path - Format the resource path for printing.
1325 * @ioa_cfg: ioa config struct
1326 * @res_path: resource path
1328 * @len: length of buffer provided
1333 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1334 u8 *res_path, char *buffer, int len)
1339 p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1340 __ipr_format_res_path(res_path, p, len - (buffer - p));
1345 * ipr_update_res_entry - Update the resource entry.
1346 * @res: resource entry struct
1347 * @cfgtew: config table entry wrapper struct
1352 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1353 struct ipr_config_table_entry_wrapper *cfgtew)
1355 char buffer[IPR_MAX_RES_PATH_LENGTH];
1359 if (res->ioa_cfg->sis64) {
1360 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1361 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1362 res->type = cfgtew->u.cfgte64->res_type;
1364 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1365 sizeof(struct ipr_std_inq_data));
1367 res->qmodel = IPR_QUEUEING_MODEL64(res);
1368 proto = cfgtew->u.cfgte64->proto;
1369 res->res_handle = cfgtew->u.cfgte64->res_handle;
1370 res->dev_id = cfgtew->u.cfgte64->dev_id;
1372 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1373 sizeof(res->dev_lun.scsi_lun));
1375 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1376 sizeof(res->res_path))) {
1377 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1378 sizeof(res->res_path));
1382 if (res->sdev && new_path)
1383 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1384 ipr_format_res_path(res->ioa_cfg,
1385 res->res_path, buffer, sizeof(buffer)));
1387 res->flags = cfgtew->u.cfgte->flags;
1388 if (res->flags & IPR_IS_IOA_RESOURCE)
1389 res->type = IPR_RES_TYPE_IOAFP;
1391 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1393 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1394 sizeof(struct ipr_std_inq_data));
1396 res->qmodel = IPR_QUEUEING_MODEL(res);
1397 proto = cfgtew->u.cfgte->proto;
1398 res->res_handle = cfgtew->u.cfgte->res_handle;
1401 ipr_update_ata_class(res, proto);
1405 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1407 * @res: resource entry struct
1408 * @cfgtew: config table entry wrapper struct
1413 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1415 struct ipr_resource_entry *gscsi_res = NULL;
1416 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1418 if (!ioa_cfg->sis64)
1421 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1422 clear_bit(res->target, ioa_cfg->array_ids);
1423 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1424 clear_bit(res->target, ioa_cfg->vset_ids);
1425 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1426 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1427 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1429 clear_bit(res->target, ioa_cfg->target_ids);
1431 } else if (res->bus == 0)
1432 clear_bit(res->target, ioa_cfg->target_ids);
1436 * ipr_handle_config_change - Handle a config change from the adapter
1437 * @ioa_cfg: ioa config struct
1443 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1444 struct ipr_hostrcb *hostrcb)
1446 struct ipr_resource_entry *res = NULL;
1447 struct ipr_config_table_entry_wrapper cfgtew;
1448 __be32 cc_res_handle;
1452 if (ioa_cfg->sis64) {
1453 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1454 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1456 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1457 cc_res_handle = cfgtew.u.cfgte->res_handle;
1460 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1461 if (res->res_handle == cc_res_handle) {
1468 if (list_empty(&ioa_cfg->free_res_q)) {
1469 ipr_send_hcam(ioa_cfg,
1470 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1475 res = list_entry(ioa_cfg->free_res_q.next,
1476 struct ipr_resource_entry, queue);
1478 list_del(&res->queue);
1479 ipr_init_res_entry(res, &cfgtew);
1480 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1483 ipr_update_res_entry(res, &cfgtew);
1485 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1487 res->del_from_ml = 1;
1488 res->res_handle = IPR_INVALID_RES_HANDLE;
1489 schedule_work(&ioa_cfg->work_q);
1491 ipr_clear_res_target(res);
1492 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1494 } else if (!res->sdev || res->del_from_ml) {
1496 schedule_work(&ioa_cfg->work_q);
1499 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1503 * ipr_process_ccn - Op done function for a CCN.
1504 * @ipr_cmd: ipr command struct
1506 * This function is the op done function for a configuration
1507 * change notification host controlled async from the adapter.
1512 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1514 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1515 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1516 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1518 list_del_init(&hostrcb->queue);
1519 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1522 if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
1523 ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
1524 dev_err(&ioa_cfg->pdev->dev,
1525 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1527 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1529 ipr_handle_config_change(ioa_cfg, hostrcb);
1534 * strip_whitespace - Strip and pad trailing whitespace.
1535 * @i: size of buffer
1536 * @buf: string to modify
1538 * This function will strip all trailing whitespace and
1539 * NUL terminate the string.
1542 static void strip_whitespace(int i, char *buf)
1547 while (i && buf[i] == ' ')
1553 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1554 * @prefix: string to print at start of printk
1555 * @hostrcb: hostrcb pointer
1556 * @vpd: vendor/product id/sn struct
1561 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1562 struct ipr_vpd *vpd)
1564 char vendor_id[IPR_VENDOR_ID_LEN + 1];
1565 char product_id[IPR_PROD_ID_LEN + 1];
1566 char sn[IPR_SERIAL_NUM_LEN + 1];
1568 memcpy(vendor_id, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1569 strip_whitespace(IPR_VENDOR_ID_LEN, vendor_id);
1571 memcpy(product_id, vpd->vpids.product_id, IPR_PROD_ID_LEN);
1572 strip_whitespace(IPR_PROD_ID_LEN, product_id);
1574 memcpy(sn, vpd->sn, IPR_SERIAL_NUM_LEN);
1575 strip_whitespace(IPR_SERIAL_NUM_LEN, sn);
1577 ipr_hcam_err(hostrcb, "%s VPID/SN: %s %s %s\n", prefix,
1578 vendor_id, product_id, sn);
1582 * ipr_log_vpd - Log the passed VPD to the error log.
1583 * @vpd: vendor/product id/sn struct
1588 static void ipr_log_vpd(struct ipr_vpd *vpd)
1590 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1591 + IPR_SERIAL_NUM_LEN];
1593 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1594 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1596 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1597 ipr_err("Vendor/Product ID: %s\n", buffer);
1599 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1600 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1601 ipr_err(" Serial Number: %s\n", buffer);
1605 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1606 * @prefix: string to print at start of printk
1607 * @hostrcb: hostrcb pointer
1608 * @vpd: vendor/product id/sn/wwn struct
1613 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1614 struct ipr_ext_vpd *vpd)
1616 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1617 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1618 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1622 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1623 * @vpd: vendor/product id/sn/wwn struct
1628 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1630 ipr_log_vpd(&vpd->vpd);
1631 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1632 be32_to_cpu(vpd->wwid[1]));
1636 * ipr_log_enhanced_cache_error - Log a cache error.
1637 * @ioa_cfg: ioa config struct
1638 * @hostrcb: hostrcb struct
1643 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1644 struct ipr_hostrcb *hostrcb)
1646 struct ipr_hostrcb_type_12_error *error;
1649 error = &hostrcb->hcam.u.error64.u.type_12_error;
1651 error = &hostrcb->hcam.u.error.u.type_12_error;
1653 ipr_err("-----Current Configuration-----\n");
1654 ipr_err("Cache Directory Card Information:\n");
1655 ipr_log_ext_vpd(&error->ioa_vpd);
1656 ipr_err("Adapter Card Information:\n");
1657 ipr_log_ext_vpd(&error->cfc_vpd);
1659 ipr_err("-----Expected Configuration-----\n");
1660 ipr_err("Cache Directory Card Information:\n");
1661 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1662 ipr_err("Adapter Card Information:\n");
1663 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1665 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1666 be32_to_cpu(error->ioa_data[0]),
1667 be32_to_cpu(error->ioa_data[1]),
1668 be32_to_cpu(error->ioa_data[2]));
1672 * ipr_log_cache_error - Log a cache error.
1673 * @ioa_cfg: ioa config struct
1674 * @hostrcb: hostrcb struct
1679 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1680 struct ipr_hostrcb *hostrcb)
1682 struct ipr_hostrcb_type_02_error *error =
1683 &hostrcb->hcam.u.error.u.type_02_error;
1685 ipr_err("-----Current Configuration-----\n");
1686 ipr_err("Cache Directory Card Information:\n");
1687 ipr_log_vpd(&error->ioa_vpd);
1688 ipr_err("Adapter Card Information:\n");
1689 ipr_log_vpd(&error->cfc_vpd);
1691 ipr_err("-----Expected Configuration-----\n");
1692 ipr_err("Cache Directory Card Information:\n");
1693 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1694 ipr_err("Adapter Card Information:\n");
1695 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1697 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1698 be32_to_cpu(error->ioa_data[0]),
1699 be32_to_cpu(error->ioa_data[1]),
1700 be32_to_cpu(error->ioa_data[2]));
1704 * ipr_log_enhanced_config_error - Log a configuration error.
1705 * @ioa_cfg: ioa config struct
1706 * @hostrcb: hostrcb struct
1711 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1712 struct ipr_hostrcb *hostrcb)
1714 int errors_logged, i;
1715 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1716 struct ipr_hostrcb_type_13_error *error;
1718 error = &hostrcb->hcam.u.error.u.type_13_error;
1719 errors_logged = be32_to_cpu(error->errors_logged);
1721 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1722 be32_to_cpu(error->errors_detected), errors_logged);
1724 dev_entry = error->dev;
1726 for (i = 0; i < errors_logged; i++, dev_entry++) {
1729 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1730 ipr_log_ext_vpd(&dev_entry->vpd);
1732 ipr_err("-----New Device Information-----\n");
1733 ipr_log_ext_vpd(&dev_entry->new_vpd);
1735 ipr_err("Cache Directory Card Information:\n");
1736 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1738 ipr_err("Adapter Card Information:\n");
1739 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1744 * ipr_log_sis64_config_error - Log a device error.
1745 * @ioa_cfg: ioa config struct
1746 * @hostrcb: hostrcb struct
1751 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1752 struct ipr_hostrcb *hostrcb)
1754 int errors_logged, i;
1755 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1756 struct ipr_hostrcb_type_23_error *error;
1757 char buffer[IPR_MAX_RES_PATH_LENGTH];
1759 error = &hostrcb->hcam.u.error64.u.type_23_error;
1760 errors_logged = be32_to_cpu(error->errors_logged);
1762 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1763 be32_to_cpu(error->errors_detected), errors_logged);
1765 dev_entry = error->dev;
1767 for (i = 0; i < errors_logged; i++, dev_entry++) {
1770 ipr_err("Device %d : %s", i + 1,
1771 __ipr_format_res_path(dev_entry->res_path,
1772 buffer, sizeof(buffer)));
1773 ipr_log_ext_vpd(&dev_entry->vpd);
1775 ipr_err("-----New Device Information-----\n");
1776 ipr_log_ext_vpd(&dev_entry->new_vpd);
1778 ipr_err("Cache Directory Card Information:\n");
1779 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1781 ipr_err("Adapter Card Information:\n");
1782 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1787 * ipr_log_config_error - Log a configuration error.
1788 * @ioa_cfg: ioa config struct
1789 * @hostrcb: hostrcb struct
1794 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1795 struct ipr_hostrcb *hostrcb)
1797 int errors_logged, i;
1798 struct ipr_hostrcb_device_data_entry *dev_entry;
1799 struct ipr_hostrcb_type_03_error *error;
1801 error = &hostrcb->hcam.u.error.u.type_03_error;
1802 errors_logged = be32_to_cpu(error->errors_logged);
1804 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1805 be32_to_cpu(error->errors_detected), errors_logged);
1807 dev_entry = error->dev;
1809 for (i = 0; i < errors_logged; i++, dev_entry++) {
1812 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1813 ipr_log_vpd(&dev_entry->vpd);
1815 ipr_err("-----New Device Information-----\n");
1816 ipr_log_vpd(&dev_entry->new_vpd);
1818 ipr_err("Cache Directory Card Information:\n");
1819 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1821 ipr_err("Adapter Card Information:\n");
1822 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1824 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1825 be32_to_cpu(dev_entry->ioa_data[0]),
1826 be32_to_cpu(dev_entry->ioa_data[1]),
1827 be32_to_cpu(dev_entry->ioa_data[2]),
1828 be32_to_cpu(dev_entry->ioa_data[3]),
1829 be32_to_cpu(dev_entry->ioa_data[4]));
1834 * ipr_log_enhanced_array_error - Log an array configuration error.
1835 * @ioa_cfg: ioa config struct
1836 * @hostrcb: hostrcb struct
1841 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1842 struct ipr_hostrcb *hostrcb)
1845 struct ipr_hostrcb_type_14_error *error;
1846 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1847 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1849 error = &hostrcb->hcam.u.error.u.type_14_error;
1853 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1854 error->protection_level,
1855 ioa_cfg->host->host_no,
1856 error->last_func_vset_res_addr.bus,
1857 error->last_func_vset_res_addr.target,
1858 error->last_func_vset_res_addr.lun);
1862 array_entry = error->array_member;
1863 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1864 ARRAY_SIZE(error->array_member));
1866 for (i = 0; i < num_entries; i++, array_entry++) {
1867 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1870 if (be32_to_cpu(error->exposed_mode_adn) == i)
1871 ipr_err("Exposed Array Member %d:\n", i);
1873 ipr_err("Array Member %d:\n", i);
1875 ipr_log_ext_vpd(&array_entry->vpd);
1876 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1877 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1878 "Expected Location");
1885 * ipr_log_array_error - Log an array configuration error.
1886 * @ioa_cfg: ioa config struct
1887 * @hostrcb: hostrcb struct
1892 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1893 struct ipr_hostrcb *hostrcb)
1896 struct ipr_hostrcb_type_04_error *error;
1897 struct ipr_hostrcb_array_data_entry *array_entry;
1898 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1900 error = &hostrcb->hcam.u.error.u.type_04_error;
1904 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1905 error->protection_level,
1906 ioa_cfg->host->host_no,
1907 error->last_func_vset_res_addr.bus,
1908 error->last_func_vset_res_addr.target,
1909 error->last_func_vset_res_addr.lun);
1913 array_entry = error->array_member;
1915 for (i = 0; i < 18; i++) {
1916 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1919 if (be32_to_cpu(error->exposed_mode_adn) == i)
1920 ipr_err("Exposed Array Member %d:\n", i);
1922 ipr_err("Array Member %d:\n", i);
1924 ipr_log_vpd(&array_entry->vpd);
1926 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1927 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1928 "Expected Location");
1933 array_entry = error->array_member2;
1940 * ipr_log_hex_data - Log additional hex IOA error data.
1941 * @ioa_cfg: ioa config struct
1942 * @data: IOA error data
1948 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
1955 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1956 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1958 for (i = 0; i < len / 4; i += 4) {
1959 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1960 be32_to_cpu(data[i]),
1961 be32_to_cpu(data[i+1]),
1962 be32_to_cpu(data[i+2]),
1963 be32_to_cpu(data[i+3]));
1968 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1969 * @ioa_cfg: ioa config struct
1970 * @hostrcb: hostrcb struct
1975 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1976 struct ipr_hostrcb *hostrcb)
1978 struct ipr_hostrcb_type_17_error *error;
1981 error = &hostrcb->hcam.u.error64.u.type_17_error;
1983 error = &hostrcb->hcam.u.error.u.type_17_error;
1985 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1986 strim(error->failure_reason);
1988 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1989 be32_to_cpu(hostrcb->hcam.u.error.prc));
1990 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1991 ipr_log_hex_data(ioa_cfg, error->data,
1992 be32_to_cpu(hostrcb->hcam.length) -
1993 (offsetof(struct ipr_hostrcb_error, u) +
1994 offsetof(struct ipr_hostrcb_type_17_error, data)));
1998 * ipr_log_dual_ioa_error - Log a dual adapter error.
1999 * @ioa_cfg: ioa config struct
2000 * @hostrcb: hostrcb struct
2005 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
2006 struct ipr_hostrcb *hostrcb)
2008 struct ipr_hostrcb_type_07_error *error;
2010 error = &hostrcb->hcam.u.error.u.type_07_error;
2011 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2012 strim(error->failure_reason);
2014 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
2015 be32_to_cpu(hostrcb->hcam.u.error.prc));
2016 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
2017 ipr_log_hex_data(ioa_cfg, error->data,
2018 be32_to_cpu(hostrcb->hcam.length) -
2019 (offsetof(struct ipr_hostrcb_error, u) +
2020 offsetof(struct ipr_hostrcb_type_07_error, data)));
2023 static const struct {
2026 } path_active_desc[] = {
2027 { IPR_PATH_NO_INFO, "Path" },
2028 { IPR_PATH_ACTIVE, "Active path" },
2029 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
2032 static const struct {
2035 } path_state_desc[] = {
2036 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
2037 { IPR_PATH_HEALTHY, "is healthy" },
2038 { IPR_PATH_DEGRADED, "is degraded" },
2039 { IPR_PATH_FAILED, "is failed" }
2043 * ipr_log_fabric_path - Log a fabric path error
2044 * @hostrcb: hostrcb struct
2045 * @fabric: fabric descriptor
2050 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
2051 struct ipr_hostrcb_fabric_desc *fabric)
2054 u8 path_state = fabric->path_state;
2055 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2056 u8 state = path_state & IPR_PATH_STATE_MASK;
2058 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2059 if (path_active_desc[i].active != active)
2062 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2063 if (path_state_desc[j].state != state)
2066 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2067 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2068 path_active_desc[i].desc, path_state_desc[j].desc,
2070 } else if (fabric->cascaded_expander == 0xff) {
2071 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2072 path_active_desc[i].desc, path_state_desc[j].desc,
2073 fabric->ioa_port, fabric->phy);
2074 } else if (fabric->phy == 0xff) {
2075 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2076 path_active_desc[i].desc, path_state_desc[j].desc,
2077 fabric->ioa_port, fabric->cascaded_expander);
2079 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2080 path_active_desc[i].desc, path_state_desc[j].desc,
2081 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2087 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2088 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2092 * ipr_log64_fabric_path - Log a fabric path error
2093 * @hostrcb: hostrcb struct
2094 * @fabric: fabric descriptor
2099 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2100 struct ipr_hostrcb64_fabric_desc *fabric)
2103 u8 path_state = fabric->path_state;
2104 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2105 u8 state = path_state & IPR_PATH_STATE_MASK;
2106 char buffer[IPR_MAX_RES_PATH_LENGTH];
2108 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2109 if (path_active_desc[i].active != active)
2112 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2113 if (path_state_desc[j].state != state)
2116 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2117 path_active_desc[i].desc, path_state_desc[j].desc,
2118 ipr_format_res_path(hostrcb->ioa_cfg,
2120 buffer, sizeof(buffer)));
2125 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2126 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2127 buffer, sizeof(buffer)));
2130 static const struct {
2133 } path_type_desc[] = {
2134 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2135 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2136 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2137 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2140 static const struct {
2143 } path_status_desc[] = {
2144 { IPR_PATH_CFG_NO_PROB, "Functional" },
2145 { IPR_PATH_CFG_DEGRADED, "Degraded" },
2146 { IPR_PATH_CFG_FAILED, "Failed" },
2147 { IPR_PATH_CFG_SUSPECT, "Suspect" },
2148 { IPR_PATH_NOT_DETECTED, "Missing" },
2149 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2152 static const char *link_rate[] = {
2155 "phy reset problem",
2172 * ipr_log_path_elem - Log a fabric path element.
2173 * @hostrcb: hostrcb struct
2174 * @cfg: fabric path element struct
2179 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2180 struct ipr_hostrcb_config_element *cfg)
2183 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2184 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2186 if (type == IPR_PATH_CFG_NOT_EXIST)
2189 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2190 if (path_type_desc[i].type != type)
2193 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2194 if (path_status_desc[j].status != status)
2197 if (type == IPR_PATH_CFG_IOA_PORT) {
2198 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2199 path_status_desc[j].desc, path_type_desc[i].desc,
2200 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2201 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2203 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2204 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2205 path_status_desc[j].desc, path_type_desc[i].desc,
2206 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2207 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2208 } else if (cfg->cascaded_expander == 0xff) {
2209 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2210 "WWN=%08X%08X\n", path_status_desc[j].desc,
2211 path_type_desc[i].desc, cfg->phy,
2212 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2213 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2214 } else if (cfg->phy == 0xff) {
2215 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2216 "WWN=%08X%08X\n", path_status_desc[j].desc,
2217 path_type_desc[i].desc, cfg->cascaded_expander,
2218 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2219 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2221 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2222 "WWN=%08X%08X\n", path_status_desc[j].desc,
2223 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2224 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2225 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2232 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2233 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2234 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2235 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2239 * ipr_log64_path_elem - Log a fabric path element.
2240 * @hostrcb: hostrcb struct
2241 * @cfg: fabric path element struct
2246 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2247 struct ipr_hostrcb64_config_element *cfg)
2250 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2251 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2252 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2253 char buffer[IPR_MAX_RES_PATH_LENGTH];
2255 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2258 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2259 if (path_type_desc[i].type != type)
2262 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2263 if (path_status_desc[j].status != status)
2266 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2267 path_status_desc[j].desc, path_type_desc[i].desc,
2268 ipr_format_res_path(hostrcb->ioa_cfg,
2269 cfg->res_path, buffer, sizeof(buffer)),
2270 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2271 be32_to_cpu(cfg->wwid[0]),
2272 be32_to_cpu(cfg->wwid[1]));
2276 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2277 "WWN=%08X%08X\n", cfg->type_status,
2278 ipr_format_res_path(hostrcb->ioa_cfg,
2279 cfg->res_path, buffer, sizeof(buffer)),
2280 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2281 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2285 * ipr_log_fabric_error - Log a fabric error.
2286 * @ioa_cfg: ioa config struct
2287 * @hostrcb: hostrcb struct
2292 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2293 struct ipr_hostrcb *hostrcb)
2295 struct ipr_hostrcb_type_20_error *error;
2296 struct ipr_hostrcb_fabric_desc *fabric;
2297 struct ipr_hostrcb_config_element *cfg;
2300 error = &hostrcb->hcam.u.error.u.type_20_error;
2301 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2302 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2304 add_len = be32_to_cpu(hostrcb->hcam.length) -
2305 (offsetof(struct ipr_hostrcb_error, u) +
2306 offsetof(struct ipr_hostrcb_type_20_error, desc));
2308 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2309 ipr_log_fabric_path(hostrcb, fabric);
2310 for_each_fabric_cfg(fabric, cfg)
2311 ipr_log_path_elem(hostrcb, cfg);
2313 add_len -= be16_to_cpu(fabric->length);
2314 fabric = (struct ipr_hostrcb_fabric_desc *)
2315 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2318 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2322 * ipr_log_sis64_array_error - Log a sis64 array error.
2323 * @ioa_cfg: ioa config struct
2324 * @hostrcb: hostrcb struct
2329 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2330 struct ipr_hostrcb *hostrcb)
2333 struct ipr_hostrcb_type_24_error *error;
2334 struct ipr_hostrcb64_array_data_entry *array_entry;
2335 char buffer[IPR_MAX_RES_PATH_LENGTH];
2336 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2338 error = &hostrcb->hcam.u.error64.u.type_24_error;
2342 ipr_err("RAID %s Array Configuration: %s\n",
2343 error->protection_level,
2344 ipr_format_res_path(ioa_cfg, error->last_res_path,
2345 buffer, sizeof(buffer)));
2349 array_entry = error->array_member;
2350 num_entries = min_t(u32, error->num_entries,
2351 ARRAY_SIZE(error->array_member));
2353 for (i = 0; i < num_entries; i++, array_entry++) {
2355 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2358 if (error->exposed_mode_adn == i)
2359 ipr_err("Exposed Array Member %d:\n", i);
2361 ipr_err("Array Member %d:\n", i);
2363 ipr_err("Array Member %d:\n", i);
2364 ipr_log_ext_vpd(&array_entry->vpd);
2365 ipr_err("Current Location: %s\n",
2366 ipr_format_res_path(ioa_cfg, array_entry->res_path,
2367 buffer, sizeof(buffer)));
2368 ipr_err("Expected Location: %s\n",
2369 ipr_format_res_path(ioa_cfg,
2370 array_entry->expected_res_path,
2371 buffer, sizeof(buffer)));
2378 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2379 * @ioa_cfg: ioa config struct
2380 * @hostrcb: hostrcb struct
2385 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2386 struct ipr_hostrcb *hostrcb)
2388 struct ipr_hostrcb_type_30_error *error;
2389 struct ipr_hostrcb64_fabric_desc *fabric;
2390 struct ipr_hostrcb64_config_element *cfg;
2393 error = &hostrcb->hcam.u.error64.u.type_30_error;
2395 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2396 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2398 add_len = be32_to_cpu(hostrcb->hcam.length) -
2399 (offsetof(struct ipr_hostrcb64_error, u) +
2400 offsetof(struct ipr_hostrcb_type_30_error, desc));
2402 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2403 ipr_log64_fabric_path(hostrcb, fabric);
2404 for_each_fabric_cfg(fabric, cfg)
2405 ipr_log64_path_elem(hostrcb, cfg);
2407 add_len -= be16_to_cpu(fabric->length);
2408 fabric = (struct ipr_hostrcb64_fabric_desc *)
2409 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2412 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2416 * ipr_log_sis64_service_required_error - Log a sis64 service required error.
2417 * @ioa_cfg: ioa config struct
2418 * @hostrcb: hostrcb struct
2423 static void ipr_log_sis64_service_required_error(struct ipr_ioa_cfg *ioa_cfg,
2424 struct ipr_hostrcb *hostrcb)
2426 struct ipr_hostrcb_type_41_error *error;
2428 error = &hostrcb->hcam.u.error64.u.type_41_error;
2430 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2431 ipr_err("Primary Failure Reason: %s\n", error->failure_reason);
2432 ipr_log_hex_data(ioa_cfg, error->data,
2433 be32_to_cpu(hostrcb->hcam.length) -
2434 (offsetof(struct ipr_hostrcb_error, u) +
2435 offsetof(struct ipr_hostrcb_type_41_error, data)));
2438 * ipr_log_generic_error - Log an adapter error.
2439 * @ioa_cfg: ioa config struct
2440 * @hostrcb: hostrcb struct
2445 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2446 struct ipr_hostrcb *hostrcb)
2448 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2449 be32_to_cpu(hostrcb->hcam.length));
2453 * ipr_log_sis64_device_error - Log a cache error.
2454 * @ioa_cfg: ioa config struct
2455 * @hostrcb: hostrcb struct
2460 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2461 struct ipr_hostrcb *hostrcb)
2463 struct ipr_hostrcb_type_21_error *error;
2464 char buffer[IPR_MAX_RES_PATH_LENGTH];
2466 error = &hostrcb->hcam.u.error64.u.type_21_error;
2468 ipr_err("-----Failing Device Information-----\n");
2469 ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2470 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2471 be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2472 ipr_err("Device Resource Path: %s\n",
2473 __ipr_format_res_path(error->res_path,
2474 buffer, sizeof(buffer)));
2475 error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2476 error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2477 ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2478 ipr_err("Secondary Problem Description: %s\n", error->second_problem_desc);
2479 ipr_err("SCSI Sense Data:\n");
2480 ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2481 ipr_err("SCSI Command Descriptor Block: \n");
2482 ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2484 ipr_err("Additional IOA Data:\n");
2485 ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2489 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2492 * This function will return the index of into the ipr_error_table
2493 * for the specified IOASC. If the IOASC is not in the table,
2494 * 0 will be returned, which points to the entry used for unknown errors.
2497 * index into the ipr_error_table
2499 static u32 ipr_get_error(u32 ioasc)
2503 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2504 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2511 * ipr_handle_log_data - Log an adapter error.
2512 * @ioa_cfg: ioa config struct
2513 * @hostrcb: hostrcb struct
2515 * This function logs an adapter error to the system.
2520 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2521 struct ipr_hostrcb *hostrcb)
2525 struct ipr_hostrcb_type_21_error *error;
2527 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2530 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2531 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2534 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2536 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2538 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2539 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2540 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2541 scsi_report_bus_reset(ioa_cfg->host,
2542 hostrcb->hcam.u.error.fd_res_addr.bus);
2545 error_index = ipr_get_error(ioasc);
2547 if (!ipr_error_table[error_index].log_hcam)
2550 if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2551 hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2552 error = &hostrcb->hcam.u.error64.u.type_21_error;
2554 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2555 ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2559 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2561 /* Set indication we have logged an error */
2562 ioa_cfg->errors_logged++;
2564 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2566 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2567 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2569 switch (hostrcb->hcam.overlay_id) {
2570 case IPR_HOST_RCB_OVERLAY_ID_2:
2571 ipr_log_cache_error(ioa_cfg, hostrcb);
2573 case IPR_HOST_RCB_OVERLAY_ID_3:
2574 ipr_log_config_error(ioa_cfg, hostrcb);
2576 case IPR_HOST_RCB_OVERLAY_ID_4:
2577 case IPR_HOST_RCB_OVERLAY_ID_6:
2578 ipr_log_array_error(ioa_cfg, hostrcb);
2580 case IPR_HOST_RCB_OVERLAY_ID_7:
2581 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2583 case IPR_HOST_RCB_OVERLAY_ID_12:
2584 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2586 case IPR_HOST_RCB_OVERLAY_ID_13:
2587 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2589 case IPR_HOST_RCB_OVERLAY_ID_14:
2590 case IPR_HOST_RCB_OVERLAY_ID_16:
2591 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2593 case IPR_HOST_RCB_OVERLAY_ID_17:
2594 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2596 case IPR_HOST_RCB_OVERLAY_ID_20:
2597 ipr_log_fabric_error(ioa_cfg, hostrcb);
2599 case IPR_HOST_RCB_OVERLAY_ID_21:
2600 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2602 case IPR_HOST_RCB_OVERLAY_ID_23:
2603 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2605 case IPR_HOST_RCB_OVERLAY_ID_24:
2606 case IPR_HOST_RCB_OVERLAY_ID_26:
2607 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2609 case IPR_HOST_RCB_OVERLAY_ID_30:
2610 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2612 case IPR_HOST_RCB_OVERLAY_ID_41:
2613 ipr_log_sis64_service_required_error(ioa_cfg, hostrcb);
2615 case IPR_HOST_RCB_OVERLAY_ID_1:
2616 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2618 ipr_log_generic_error(ioa_cfg, hostrcb);
2623 static struct ipr_hostrcb *ipr_get_free_hostrcb(struct ipr_ioa_cfg *ioa)
2625 struct ipr_hostrcb *hostrcb;
2627 hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q,
2628 struct ipr_hostrcb, queue);
2630 if (unlikely(!hostrcb)) {
2631 dev_info(&ioa->pdev->dev, "Reclaiming async error buffers.");
2632 hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q,
2633 struct ipr_hostrcb, queue);
2636 list_del_init(&hostrcb->queue);
2641 * ipr_process_error - Op done function for an adapter error log.
2642 * @ipr_cmd: ipr command struct
2644 * This function is the op done function for an error log host
2645 * controlled async from the adapter. It will log the error and
2646 * send the HCAM back to the adapter.
2651 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2653 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2654 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2655 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2659 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2661 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2663 list_del_init(&hostrcb->queue);
2664 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2667 ipr_handle_log_data(ioa_cfg, hostrcb);
2668 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2669 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2670 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
2671 ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
2672 dev_err(&ioa_cfg->pdev->dev,
2673 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2676 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
2677 schedule_work(&ioa_cfg->work_q);
2678 hostrcb = ipr_get_free_hostrcb(ioa_cfg);
2680 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2684 * ipr_timeout - An internally generated op has timed out.
2685 * @ipr_cmd: ipr command struct
2687 * This function blocks host requests and initiates an
2693 static void ipr_timeout(struct timer_list *t)
2695 struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
2696 unsigned long lock_flags = 0;
2697 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2700 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2702 ioa_cfg->errors_logged++;
2703 dev_err(&ioa_cfg->pdev->dev,
2704 "Adapter being reset due to command timeout.\n");
2706 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2707 ioa_cfg->sdt_state = GET_DUMP;
2709 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2710 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2712 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2717 * ipr_oper_timeout - Adapter timed out transitioning to operational
2718 * @ipr_cmd: ipr command struct
2720 * This function blocks host requests and initiates an
2726 static void ipr_oper_timeout(struct timer_list *t)
2728 struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
2729 unsigned long lock_flags = 0;
2730 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2733 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2735 ioa_cfg->errors_logged++;
2736 dev_err(&ioa_cfg->pdev->dev,
2737 "Adapter timed out transitioning to operational.\n");
2739 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2740 ioa_cfg->sdt_state = GET_DUMP;
2742 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2744 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2745 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2748 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2753 * ipr_find_ses_entry - Find matching SES in SES table
2754 * @res: resource entry struct of SES
2757 * pointer to SES table entry / NULL on failure
2759 static const struct ipr_ses_table_entry *
2760 ipr_find_ses_entry(struct ipr_resource_entry *res)
2763 struct ipr_std_inq_vpids *vpids;
2764 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2766 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2767 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2768 if (ste->compare_product_id_byte[j] == 'X') {
2769 vpids = &res->std_inq_data.vpids;
2770 if (vpids->product_id[j] == ste->product_id[j])
2778 if (matches == IPR_PROD_ID_LEN)
2786 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2787 * @ioa_cfg: ioa config struct
2789 * @bus_width: bus width
2792 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2793 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2794 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2795 * max 160MHz = max 320MB/sec).
2797 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2799 struct ipr_resource_entry *res;
2800 const struct ipr_ses_table_entry *ste;
2801 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2803 /* Loop through each config table entry in the config table buffer */
2804 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2805 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2808 if (bus != res->bus)
2811 if (!(ste = ipr_find_ses_entry(res)))
2814 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2817 return max_xfer_rate;
2821 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2822 * @ioa_cfg: ioa config struct
2823 * @max_delay: max delay in micro-seconds to wait
2825 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2828 * 0 on success / other on failure
2830 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2832 volatile u32 pcii_reg;
2835 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2836 while (delay < max_delay) {
2837 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2839 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2842 /* udelay cannot be used if delay is more than a few milliseconds */
2843 if ((delay / 1000) > MAX_UDELAY_MS)
2844 mdelay(delay / 1000);
2854 * ipr_get_sis64_dump_data_section - Dump IOA memory
2855 * @ioa_cfg: ioa config struct
2856 * @start_addr: adapter address to dump
2857 * @dest: destination kernel buffer
2858 * @length_in_words: length to dump in 4 byte words
2863 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2865 __be32 *dest, u32 length_in_words)
2869 for (i = 0; i < length_in_words; i++) {
2870 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2871 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2879 * ipr_get_ldump_data_section - Dump IOA memory
2880 * @ioa_cfg: ioa config struct
2881 * @start_addr: adapter address to dump
2882 * @dest: destination kernel buffer
2883 * @length_in_words: length to dump in 4 byte words
2886 * 0 on success / -EIO on failure
2888 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2890 __be32 *dest, u32 length_in_words)
2892 volatile u32 temp_pcii_reg;
2896 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2897 dest, length_in_words);
2899 /* Write IOA interrupt reg starting LDUMP state */
2900 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2901 ioa_cfg->regs.set_uproc_interrupt_reg32);
2903 /* Wait for IO debug acknowledge */
2904 if (ipr_wait_iodbg_ack(ioa_cfg,
2905 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2906 dev_err(&ioa_cfg->pdev->dev,
2907 "IOA dump long data transfer timeout\n");
2911 /* Signal LDUMP interlocked - clear IO debug ack */
2912 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2913 ioa_cfg->regs.clr_interrupt_reg);
2915 /* Write Mailbox with starting address */
2916 writel(start_addr, ioa_cfg->ioa_mailbox);
2918 /* Signal address valid - clear IOA Reset alert */
2919 writel(IPR_UPROCI_RESET_ALERT,
2920 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2922 for (i = 0; i < length_in_words; i++) {
2923 /* Wait for IO debug acknowledge */
2924 if (ipr_wait_iodbg_ack(ioa_cfg,
2925 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2926 dev_err(&ioa_cfg->pdev->dev,
2927 "IOA dump short data transfer timeout\n");
2931 /* Read data from mailbox and increment destination pointer */
2932 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2935 /* For all but the last word of data, signal data received */
2936 if (i < (length_in_words - 1)) {
2937 /* Signal dump data received - Clear IO debug Ack */
2938 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2939 ioa_cfg->regs.clr_interrupt_reg);
2943 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2944 writel(IPR_UPROCI_RESET_ALERT,
2945 ioa_cfg->regs.set_uproc_interrupt_reg32);
2947 writel(IPR_UPROCI_IO_DEBUG_ALERT,
2948 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2950 /* Signal dump data received - Clear IO debug Ack */
2951 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2952 ioa_cfg->regs.clr_interrupt_reg);
2954 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2955 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2957 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2959 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2969 #ifdef CONFIG_SCSI_IPR_DUMP
2971 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2972 * @ioa_cfg: ioa config struct
2973 * @pci_address: adapter address
2974 * @length: length of data to copy
2976 * Copy data from PCI adapter to kernel buffer.
2977 * Note: length MUST be a 4 byte multiple
2979 * 0 on success / other on failure
2981 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2982 unsigned long pci_address, u32 length)
2984 int bytes_copied = 0;
2985 int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2987 unsigned long lock_flags = 0;
2988 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2991 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2993 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2995 while (bytes_copied < length &&
2996 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2997 if (ioa_dump->page_offset >= PAGE_SIZE ||
2998 ioa_dump->page_offset == 0) {
2999 page = (__be32 *)__get_free_page(GFP_ATOMIC);
3003 return bytes_copied;
3006 ioa_dump->page_offset = 0;
3007 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
3008 ioa_dump->next_page_index++;
3010 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
3012 rem_len = length - bytes_copied;
3013 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
3014 cur_len = min(rem_len, rem_page_len);
3016 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3017 if (ioa_cfg->sdt_state == ABORT_DUMP) {
3020 rc = ipr_get_ldump_data_section(ioa_cfg,
3021 pci_address + bytes_copied,
3022 &page[ioa_dump->page_offset / 4],
3023 (cur_len / sizeof(u32)));
3025 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3028 ioa_dump->page_offset += cur_len;
3029 bytes_copied += cur_len;
3037 return bytes_copied;
3041 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
3042 * @hdr: dump entry header struct
3047 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
3049 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
3051 hdr->offset = sizeof(*hdr);
3052 hdr->status = IPR_DUMP_STATUS_SUCCESS;
3056 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
3057 * @ioa_cfg: ioa config struct
3058 * @driver_dump: driver dump struct
3063 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
3064 struct ipr_driver_dump *driver_dump)
3066 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3068 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
3069 driver_dump->ioa_type_entry.hdr.len =
3070 sizeof(struct ipr_dump_ioa_type_entry) -
3071 sizeof(struct ipr_dump_entry_header);
3072 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3073 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
3074 driver_dump->ioa_type_entry.type = ioa_cfg->type;
3075 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
3076 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
3077 ucode_vpd->minor_release[1];
3078 driver_dump->hdr.num_entries++;
3082 * ipr_dump_version_data - Fill in the driver version in the dump.
3083 * @ioa_cfg: ioa config struct
3084 * @driver_dump: driver dump struct
3089 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
3090 struct ipr_driver_dump *driver_dump)
3092 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
3093 driver_dump->version_entry.hdr.len =
3094 sizeof(struct ipr_dump_version_entry) -
3095 sizeof(struct ipr_dump_entry_header);
3096 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3097 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
3098 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
3099 driver_dump->hdr.num_entries++;
3103 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3104 * @ioa_cfg: ioa config struct
3105 * @driver_dump: driver dump struct
3110 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3111 struct ipr_driver_dump *driver_dump)
3113 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3114 driver_dump->trace_entry.hdr.len =
3115 sizeof(struct ipr_dump_trace_entry) -
3116 sizeof(struct ipr_dump_entry_header);
3117 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3118 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3119 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3120 driver_dump->hdr.num_entries++;
3124 * ipr_dump_location_data - Fill in the IOA location in the dump.
3125 * @ioa_cfg: ioa config struct
3126 * @driver_dump: driver dump struct
3131 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3132 struct ipr_driver_dump *driver_dump)
3134 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3135 driver_dump->location_entry.hdr.len =
3136 sizeof(struct ipr_dump_location_entry) -
3137 sizeof(struct ipr_dump_entry_header);
3138 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3139 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
3140 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
3141 driver_dump->hdr.num_entries++;
3145 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3146 * @ioa_cfg: ioa config struct
3147 * @dump: dump struct
3152 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3154 unsigned long start_addr, sdt_word;
3155 unsigned long lock_flags = 0;
3156 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3157 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
3158 u32 num_entries, max_num_entries, start_off, end_off;
3159 u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
3160 struct ipr_sdt *sdt;
3166 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3168 if (ioa_cfg->sdt_state != READ_DUMP) {
3169 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3173 if (ioa_cfg->sis64) {
3174 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3175 ssleep(IPR_DUMP_DELAY_SECONDS);
3176 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3179 start_addr = readl(ioa_cfg->ioa_mailbox);
3181 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
3182 dev_err(&ioa_cfg->pdev->dev,
3183 "Invalid dump table format: %lx\n", start_addr);
3184 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3188 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3190 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3192 /* Initialize the overall dump header */
3193 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3194 driver_dump->hdr.num_entries = 1;
3195 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3196 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3197 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3198 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3200 ipr_dump_version_data(ioa_cfg, driver_dump);
3201 ipr_dump_location_data(ioa_cfg, driver_dump);
3202 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3203 ipr_dump_trace_data(ioa_cfg, driver_dump);
3205 /* Update dump_header */
3206 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3208 /* IOA Dump entry */
3209 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3210 ioa_dump->hdr.len = 0;
3211 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3212 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3214 /* First entries in sdt are actually a list of dump addresses and
3215 lengths to gather the real dump data. sdt represents the pointer
3216 to the ioa generated dump table. Dump data will be extracted based
3217 on entries in this table */
3218 sdt = &ioa_dump->sdt;
3220 if (ioa_cfg->sis64) {
3221 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3222 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3224 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3225 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3228 bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3229 (max_num_entries * sizeof(struct ipr_sdt_entry));
3230 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3231 bytes_to_copy / sizeof(__be32));
3233 /* Smart Dump table is ready to use and the first entry is valid */
3234 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3235 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3236 dev_err(&ioa_cfg->pdev->dev,
3237 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3238 rc, be32_to_cpu(sdt->hdr.state));
3239 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3240 ioa_cfg->sdt_state = DUMP_OBTAINED;
3241 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3245 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3247 if (num_entries > max_num_entries)
3248 num_entries = max_num_entries;
3250 /* Update dump length to the actual data to be copied */
3251 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3253 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3255 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3257 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3259 for (i = 0; i < num_entries; i++) {
3260 if (ioa_dump->hdr.len > max_dump_size) {
3261 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3265 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3266 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3268 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3270 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3271 end_off = be32_to_cpu(sdt->entry[i].end_token);
3273 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3274 bytes_to_copy = end_off - start_off;
3279 if (bytes_to_copy > max_dump_size) {
3280 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3284 /* Copy data from adapter to driver buffers */
3285 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3288 ioa_dump->hdr.len += bytes_copied;
3290 if (bytes_copied != bytes_to_copy) {
3291 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3298 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3300 /* Update dump_header */
3301 driver_dump->hdr.len += ioa_dump->hdr.len;
3303 ioa_cfg->sdt_state = DUMP_OBTAINED;
3308 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3312 * ipr_release_dump - Free adapter dump memory
3313 * @kref: kref struct
3318 static void ipr_release_dump(struct kref *kref)
3320 struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3321 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3322 unsigned long lock_flags = 0;
3326 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3327 ioa_cfg->dump = NULL;
3328 ioa_cfg->sdt_state = INACTIVE;
3329 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3331 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3332 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3334 vfree(dump->ioa_dump.ioa_data);
3339 static void ipr_add_remove_thread(struct work_struct *work)
3341 unsigned long lock_flags;
3342 struct ipr_resource_entry *res;
3343 struct scsi_device *sdev;
3344 struct ipr_ioa_cfg *ioa_cfg =
3345 container_of(work, struct ipr_ioa_cfg, scsi_add_work_q);
3346 u8 bus, target, lun;
3350 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3355 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
3356 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3360 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3361 if (res->del_from_ml && res->sdev) {
3364 if (!scsi_device_get(sdev)) {
3365 if (!res->add_to_ml)
3366 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3368 res->del_from_ml = 0;
3369 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3370 scsi_remove_device(sdev);
3371 scsi_device_put(sdev);
3372 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3379 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3380 if (res->add_to_ml) {
3382 target = res->target;
3385 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3386 scsi_add_device(ioa_cfg->host, bus, target, lun);
3387 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3392 ioa_cfg->scan_done = 1;
3393 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3394 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3399 * ipr_worker_thread - Worker thread
3400 * @work: ioa config struct
3402 * Called at task level from a work thread. This function takes care
3403 * of adding and removing device from the mid-layer as configuration
3404 * changes are detected by the adapter.
3409 static void ipr_worker_thread(struct work_struct *work)
3411 unsigned long lock_flags;
3412 struct ipr_dump *dump;
3413 struct ipr_ioa_cfg *ioa_cfg =
3414 container_of(work, struct ipr_ioa_cfg, work_q);
3417 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3419 if (ioa_cfg->sdt_state == READ_DUMP) {
3420 dump = ioa_cfg->dump;
3422 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3425 kref_get(&dump->kref);
3426 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3427 ipr_get_ioa_dump(ioa_cfg, dump);
3428 kref_put(&dump->kref, ipr_release_dump);
3430 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3431 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3432 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3433 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3437 if (ioa_cfg->scsi_unblock) {
3438 ioa_cfg->scsi_unblock = 0;
3439 ioa_cfg->scsi_blocked = 0;
3440 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3441 scsi_unblock_requests(ioa_cfg->host);
3442 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3443 if (ioa_cfg->scsi_blocked)
3444 scsi_block_requests(ioa_cfg->host);
3447 if (!ioa_cfg->scan_enabled) {
3448 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3452 schedule_work(&ioa_cfg->scsi_add_work_q);
3454 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3458 #ifdef CONFIG_SCSI_IPR_TRACE
3460 * ipr_read_trace - Dump the adapter trace
3461 * @filp: open sysfs file
3462 * @kobj: kobject struct
3463 * @bin_attr: bin_attribute struct
3466 * @count: buffer size
3469 * number of bytes printed to buffer
3471 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3472 struct bin_attribute *bin_attr,
3473 char *buf, loff_t off, size_t count)
3475 struct device *dev = container_of(kobj, struct device, kobj);
3476 struct Scsi_Host *shost = class_to_shost(dev);
3477 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3478 unsigned long lock_flags = 0;
3481 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3482 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3484 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3489 static struct bin_attribute ipr_trace_attr = {
3495 .read = ipr_read_trace,
3500 * ipr_show_fw_version - Show the firmware version
3501 * @dev: class device struct
3505 * number of bytes printed to buffer
3507 static ssize_t ipr_show_fw_version(struct device *dev,
3508 struct device_attribute *attr, char *buf)
3510 struct Scsi_Host *shost = class_to_shost(dev);
3511 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3512 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3513 unsigned long lock_flags = 0;
3516 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3517 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3518 ucode_vpd->major_release, ucode_vpd->card_type,
3519 ucode_vpd->minor_release[0],
3520 ucode_vpd->minor_release[1]);
3521 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3525 static struct device_attribute ipr_fw_version_attr = {
3527 .name = "fw_version",
3530 .show = ipr_show_fw_version,
3534 * ipr_show_log_level - Show the adapter's error logging level
3535 * @dev: class device struct
3539 * number of bytes printed to buffer
3541 static ssize_t ipr_show_log_level(struct device *dev,
3542 struct device_attribute *attr, char *buf)
3544 struct Scsi_Host *shost = class_to_shost(dev);
3545 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3546 unsigned long lock_flags = 0;
3549 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3550 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3551 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3556 * ipr_store_log_level - Change the adapter's error logging level
3557 * @dev: class device struct
3561 * number of bytes printed to buffer
3563 static ssize_t ipr_store_log_level(struct device *dev,
3564 struct device_attribute *attr,
3565 const char *buf, size_t count)
3567 struct Scsi_Host *shost = class_to_shost(dev);
3568 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3569 unsigned long lock_flags = 0;
3571 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3572 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3573 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3577 static struct device_attribute ipr_log_level_attr = {
3579 .name = "log_level",
3580 .mode = S_IRUGO | S_IWUSR,
3582 .show = ipr_show_log_level,
3583 .store = ipr_store_log_level
3587 * ipr_store_diagnostics - IOA Diagnostics interface
3588 * @dev: device struct
3590 * @count: buffer size
3592 * This function will reset the adapter and wait a reasonable
3593 * amount of time for any errors that the adapter might log.
3596 * count on success / other on failure
3598 static ssize_t ipr_store_diagnostics(struct device *dev,
3599 struct device_attribute *attr,
3600 const char *buf, size_t count)
3602 struct Scsi_Host *shost = class_to_shost(dev);
3603 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3604 unsigned long lock_flags = 0;
3607 if (!capable(CAP_SYS_ADMIN))
3610 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3611 while (ioa_cfg->in_reset_reload) {
3612 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3613 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3614 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3617 ioa_cfg->errors_logged = 0;
3618 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3620 if (ioa_cfg->in_reset_reload) {
3621 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3622 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3624 /* Wait for a second for any errors to be logged */
3627 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3631 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3632 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3634 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3639 static struct device_attribute ipr_diagnostics_attr = {
3641 .name = "run_diagnostics",
3644 .store = ipr_store_diagnostics
3648 * ipr_show_adapter_state - Show the adapter's state
3649 * @class_dev: device struct
3653 * number of bytes printed to buffer
3655 static ssize_t ipr_show_adapter_state(struct device *dev,
3656 struct device_attribute *attr, char *buf)
3658 struct Scsi_Host *shost = class_to_shost(dev);
3659 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3660 unsigned long lock_flags = 0;
3663 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3664 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3665 len = snprintf(buf, PAGE_SIZE, "offline\n");
3667 len = snprintf(buf, PAGE_SIZE, "online\n");
3668 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3673 * ipr_store_adapter_state - Change adapter state
3674 * @dev: device struct
3676 * @count: buffer size
3678 * This function will change the adapter's state.
3681 * count on success / other on failure
3683 static ssize_t ipr_store_adapter_state(struct device *dev,
3684 struct device_attribute *attr,
3685 const char *buf, size_t count)
3687 struct Scsi_Host *shost = class_to_shost(dev);
3688 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3689 unsigned long lock_flags;
3690 int result = count, i;
3692 if (!capable(CAP_SYS_ADMIN))
3695 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3696 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3697 !strncmp(buf, "online", 6)) {
3698 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3699 spin_lock(&ioa_cfg->hrrq[i]._lock);
3700 ioa_cfg->hrrq[i].ioa_is_dead = 0;
3701 spin_unlock(&ioa_cfg->hrrq[i]._lock);
3704 ioa_cfg->reset_retries = 0;
3705 ioa_cfg->in_ioa_bringdown = 0;
3706 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3708 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3709 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3714 static struct device_attribute ipr_ioa_state_attr = {
3716 .name = "online_state",
3717 .mode = S_IRUGO | S_IWUSR,
3719 .show = ipr_show_adapter_state,
3720 .store = ipr_store_adapter_state
3724 * ipr_store_reset_adapter - Reset the adapter
3725 * @dev: device struct
3727 * @count: buffer size
3729 * This function will reset the adapter.
3732 * count on success / other on failure
3734 static ssize_t ipr_store_reset_adapter(struct device *dev,
3735 struct device_attribute *attr,
3736 const char *buf, size_t count)
3738 struct Scsi_Host *shost = class_to_shost(dev);
3739 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3740 unsigned long lock_flags;
3743 if (!capable(CAP_SYS_ADMIN))
3746 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3747 if (!ioa_cfg->in_reset_reload)
3748 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3749 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3750 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3755 static struct device_attribute ipr_ioa_reset_attr = {
3757 .name = "reset_host",
3760 .store = ipr_store_reset_adapter
3763 static int ipr_iopoll(struct irq_poll *iop, int budget);
3765 * ipr_show_iopoll_weight - Show ipr polling mode
3766 * @dev: class device struct
3770 * number of bytes printed to buffer
3772 static ssize_t ipr_show_iopoll_weight(struct device *dev,
3773 struct device_attribute *attr, char *buf)
3775 struct Scsi_Host *shost = class_to_shost(dev);
3776 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3777 unsigned long lock_flags = 0;
3780 spin_lock_irqsave(shost->host_lock, lock_flags);
3781 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3782 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3788 * ipr_store_iopoll_weight - Change the adapter's polling mode
3789 * @dev: class device struct
3793 * number of bytes printed to buffer
3795 static ssize_t ipr_store_iopoll_weight(struct device *dev,
3796 struct device_attribute *attr,
3797 const char *buf, size_t count)
3799 struct Scsi_Host *shost = class_to_shost(dev);
3800 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3801 unsigned long user_iopoll_weight;
3802 unsigned long lock_flags = 0;
3805 if (!ioa_cfg->sis64) {
3806 dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n");
3809 if (kstrtoul(buf, 10, &user_iopoll_weight))
3812 if (user_iopoll_weight > 256) {
3813 dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n");
3817 if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3818 dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n");
3822 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3823 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3824 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
3827 spin_lock_irqsave(shost->host_lock, lock_flags);
3828 ioa_cfg->iopoll_weight = user_iopoll_weight;
3829 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3830 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3831 irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
3832 ioa_cfg->iopoll_weight, ipr_iopoll);
3835 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3840 static struct device_attribute ipr_iopoll_weight_attr = {
3842 .name = "iopoll_weight",
3843 .mode = S_IRUGO | S_IWUSR,
3845 .show = ipr_show_iopoll_weight,
3846 .store = ipr_store_iopoll_weight
3850 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3851 * @buf_len: buffer length
3853 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3854 * list to use for microcode download
3857 * pointer to sglist / NULL on failure
3859 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3862 struct ipr_sglist *sglist;
3864 /* Get the minimum size per scatter/gather element */
3865 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3867 /* Get the actual size per element */
3868 order = get_order(sg_size);
3870 /* Allocate a scatter/gather list for the DMA */
3871 sglist = kzalloc(sizeof(struct ipr_sglist), GFP_KERNEL);
3872 if (sglist == NULL) {
3876 sglist->order = order;
3877 sglist->scatterlist = sgl_alloc_order(buf_len, order, false, GFP_KERNEL,
3879 if (!sglist->scatterlist) {
3888 * ipr_free_ucode_buffer - Frees a microcode download buffer
3889 * @p_dnld: scatter/gather list pointer
3891 * Free a DMA'able ucode download buffer previously allocated with
3892 * ipr_alloc_ucode_buffer
3897 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3899 sgl_free_order(sglist->scatterlist, sglist->order);
3904 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3905 * @sglist: scatter/gather list pointer
3906 * @buffer: buffer pointer
3907 * @len: buffer length
3909 * Copy a microcode image from a user buffer into a buffer allocated by
3910 * ipr_alloc_ucode_buffer
3913 * 0 on success / other on failure
3915 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3916 u8 *buffer, u32 len)
3918 int bsize_elem, i, result = 0;
3919 struct scatterlist *scatterlist;
3922 /* Determine the actual number of bytes per element */
3923 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3925 scatterlist = sglist->scatterlist;
3927 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3928 struct page *page = sg_page(&scatterlist[i]);
3931 memcpy(kaddr, buffer, bsize_elem);
3934 scatterlist[i].length = bsize_elem;
3942 if (len % bsize_elem) {
3943 struct page *page = sg_page(&scatterlist[i]);
3946 memcpy(kaddr, buffer, len % bsize_elem);
3949 scatterlist[i].length = len % bsize_elem;
3952 sglist->buffer_len = len;
3957 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3958 * @ipr_cmd: ipr command struct
3959 * @sglist: scatter/gather list
3961 * Builds a microcode download IOA data list (IOADL).
3964 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3965 struct ipr_sglist *sglist)
3967 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3968 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3969 struct scatterlist *scatterlist = sglist->scatterlist;
3972 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3973 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3974 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3977 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3978 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3979 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3980 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3981 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3984 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3988 * ipr_build_ucode_ioadl - Build a microcode download IOADL
3989 * @ipr_cmd: ipr command struct
3990 * @sglist: scatter/gather list
3992 * Builds a microcode download IOA data list (IOADL).
3995 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3996 struct ipr_sglist *sglist)
3998 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3999 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
4000 struct scatterlist *scatterlist = sglist->scatterlist;
4003 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
4004 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4005 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
4008 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4010 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
4011 ioadl[i].flags_and_data_len =
4012 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
4014 cpu_to_be32(sg_dma_address(&scatterlist[i]));
4017 ioadl[i-1].flags_and_data_len |=
4018 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4022 * ipr_update_ioa_ucode - Update IOA's microcode
4023 * @ioa_cfg: ioa config struct
4024 * @sglist: scatter/gather list
4026 * Initiate an adapter reset to update the IOA's microcode
4029 * 0 on success / -EIO on failure
4031 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
4032 struct ipr_sglist *sglist)
4034 unsigned long lock_flags;
4036 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4037 while (ioa_cfg->in_reset_reload) {
4038 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4039 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4040 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4043 if (ioa_cfg->ucode_sglist) {
4044 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4045 dev_err(&ioa_cfg->pdev->dev,
4046 "Microcode download already in progress\n");
4050 sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
4051 sglist->scatterlist, sglist->num_sg,
4054 if (!sglist->num_dma_sg) {
4055 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4056 dev_err(&ioa_cfg->pdev->dev,
4057 "Failed to map microcode download buffer!\n");
4061 ioa_cfg->ucode_sglist = sglist;
4062 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
4063 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4064 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4066 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4067 ioa_cfg->ucode_sglist = NULL;
4068 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4073 * ipr_store_update_fw - Update the firmware on the adapter
4074 * @class_dev: device struct
4076 * @count: buffer size
4078 * This function will update the firmware on the adapter.
4081 * count on success / other on failure
4083 static ssize_t ipr_store_update_fw(struct device *dev,
4084 struct device_attribute *attr,
4085 const char *buf, size_t count)
4087 struct Scsi_Host *shost = class_to_shost(dev);
4088 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4089 struct ipr_ucode_image_header *image_hdr;
4090 const struct firmware *fw_entry;
4091 struct ipr_sglist *sglist;
4095 int result, dnld_size;
4097 if (!capable(CAP_SYS_ADMIN))
4100 snprintf(fname, sizeof(fname), "%s", buf);
4102 endline = strchr(fname, '\n');
4106 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
4107 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4111 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4113 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4114 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4115 sglist = ipr_alloc_ucode_buffer(dnld_size);
4118 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4119 release_firmware(fw_entry);
4123 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4126 dev_err(&ioa_cfg->pdev->dev,
4127 "Microcode buffer copy to DMA buffer failed\n");
4131 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
4133 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
4138 ipr_free_ucode_buffer(sglist);
4139 release_firmware(fw_entry);
4143 static struct device_attribute ipr_update_fw_attr = {
4145 .name = "update_fw",
4148 .store = ipr_store_update_fw
4152 * ipr_show_fw_type - Show the adapter's firmware type.
4153 * @dev: class device struct
4157 * number of bytes printed to buffer
4159 static ssize_t ipr_show_fw_type(struct device *dev,
4160 struct device_attribute *attr, char *buf)
4162 struct Scsi_Host *shost = class_to_shost(dev);
4163 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4164 unsigned long lock_flags = 0;
4167 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4168 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4169 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4173 static struct device_attribute ipr_ioa_fw_type_attr = {
4178 .show = ipr_show_fw_type
4181 static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
4182 struct bin_attribute *bin_attr, char *buf,
4183 loff_t off, size_t count)
4185 struct device *cdev = container_of(kobj, struct device, kobj);
4186 struct Scsi_Host *shost = class_to_shost(cdev);
4187 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4188 struct ipr_hostrcb *hostrcb;
4189 unsigned long lock_flags = 0;
4192 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4193 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4194 struct ipr_hostrcb, queue);
4196 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4199 ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam,
4200 sizeof(hostrcb->hcam));
4201 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4205 static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
4206 struct bin_attribute *bin_attr, char *buf,
4207 loff_t off, size_t count)
4209 struct device *cdev = container_of(kobj, struct device, kobj);
4210 struct Scsi_Host *shost = class_to_shost(cdev);
4211 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4212 struct ipr_hostrcb *hostrcb;
4213 unsigned long lock_flags = 0;
4215 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4216 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4217 struct ipr_hostrcb, queue);
4219 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4223 /* Reclaim hostrcb before exit */
4224 list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
4225 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4229 static struct bin_attribute ipr_ioa_async_err_log = {
4231 .name = "async_err_log",
4232 .mode = S_IRUGO | S_IWUSR,
4235 .read = ipr_read_async_err_log,
4236 .write = ipr_next_async_err_log
4239 static struct device_attribute *ipr_ioa_attrs[] = {
4240 &ipr_fw_version_attr,
4241 &ipr_log_level_attr,
4242 &ipr_diagnostics_attr,
4243 &ipr_ioa_state_attr,
4244 &ipr_ioa_reset_attr,
4245 &ipr_update_fw_attr,
4246 &ipr_ioa_fw_type_attr,
4247 &ipr_iopoll_weight_attr,
4251 #ifdef CONFIG_SCSI_IPR_DUMP
4253 * ipr_read_dump - Dump the adapter
4254 * @filp: open sysfs file
4255 * @kobj: kobject struct
4256 * @bin_attr: bin_attribute struct
4259 * @count: buffer size
4262 * number of bytes printed to buffer
4264 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4265 struct bin_attribute *bin_attr,
4266 char *buf, loff_t off, size_t count)
4268 struct device *cdev = container_of(kobj, struct device, kobj);
4269 struct Scsi_Host *shost = class_to_shost(cdev);
4270 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4271 struct ipr_dump *dump;
4272 unsigned long lock_flags = 0;
4277 if (!capable(CAP_SYS_ADMIN))
4280 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4281 dump = ioa_cfg->dump;
4283 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4284 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4287 kref_get(&dump->kref);
4288 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4290 if (off > dump->driver_dump.hdr.len) {
4291 kref_put(&dump->kref, ipr_release_dump);
4295 if (off + count > dump->driver_dump.hdr.len) {
4296 count = dump->driver_dump.hdr.len - off;
4300 if (count && off < sizeof(dump->driver_dump)) {
4301 if (off + count > sizeof(dump->driver_dump))
4302 len = sizeof(dump->driver_dump) - off;
4305 src = (u8 *)&dump->driver_dump + off;
4306 memcpy(buf, src, len);
4312 off -= sizeof(dump->driver_dump);
4315 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4316 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4317 sizeof(struct ipr_sdt_entry));
4319 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4320 (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4322 if (count && off < sdt_end) {
4323 if (off + count > sdt_end)
4324 len = sdt_end - off;
4327 src = (u8 *)&dump->ioa_dump + off;
4328 memcpy(buf, src, len);
4337 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4338 len = PAGE_ALIGN(off) - off;
4341 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4342 src += off & ~PAGE_MASK;
4343 memcpy(buf, src, len);
4349 kref_put(&dump->kref, ipr_release_dump);
4354 * ipr_alloc_dump - Prepare for adapter dump
4355 * @ioa_cfg: ioa config struct
4358 * 0 on success / other on failure
4360 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4362 struct ipr_dump *dump;
4364 unsigned long lock_flags = 0;
4366 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4369 ipr_err("Dump memory allocation failed\n");
4374 ioa_data = vmalloc(array_size(IPR_FMT3_MAX_NUM_DUMP_PAGES,
4377 ioa_data = vmalloc(array_size(IPR_FMT2_MAX_NUM_DUMP_PAGES,
4381 ipr_err("Dump memory allocation failed\n");
4386 dump->ioa_dump.ioa_data = ioa_data;
4388 kref_init(&dump->kref);
4389 dump->ioa_cfg = ioa_cfg;
4391 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4393 if (INACTIVE != ioa_cfg->sdt_state) {
4394 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4395 vfree(dump->ioa_dump.ioa_data);
4400 ioa_cfg->dump = dump;
4401 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4402 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4403 ioa_cfg->dump_taken = 1;
4404 schedule_work(&ioa_cfg->work_q);
4406 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4412 * ipr_free_dump - Free adapter dump memory
4413 * @ioa_cfg: ioa config struct
4416 * 0 on success / other on failure
4418 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4420 struct ipr_dump *dump;
4421 unsigned long lock_flags = 0;
4425 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4426 dump = ioa_cfg->dump;
4428 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4432 ioa_cfg->dump = NULL;
4433 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4435 kref_put(&dump->kref, ipr_release_dump);
4442 * ipr_write_dump - Setup dump state of adapter
4443 * @filp: open sysfs file
4444 * @kobj: kobject struct
4445 * @bin_attr: bin_attribute struct
4448 * @count: buffer size
4451 * number of bytes printed to buffer
4453 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4454 struct bin_attribute *bin_attr,
4455 char *buf, loff_t off, size_t count)
4457 struct device *cdev = container_of(kobj, struct device, kobj);
4458 struct Scsi_Host *shost = class_to_shost(cdev);
4459 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4462 if (!capable(CAP_SYS_ADMIN))
4466 rc = ipr_alloc_dump(ioa_cfg);
4467 else if (buf[0] == '0')
4468 rc = ipr_free_dump(ioa_cfg);
4478 static struct bin_attribute ipr_dump_attr = {
4481 .mode = S_IRUSR | S_IWUSR,
4484 .read = ipr_read_dump,
4485 .write = ipr_write_dump
4488 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4492 * ipr_change_queue_depth - Change the device's queue depth
4493 * @sdev: scsi device struct
4494 * @qdepth: depth to set
4495 * @reason: calling context
4500 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
4502 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4503 struct ipr_resource_entry *res;
4504 unsigned long lock_flags = 0;
4506 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4507 res = (struct ipr_resource_entry *)sdev->hostdata;
4509 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4510 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4511 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4513 scsi_change_queue_depth(sdev, qdepth);
4514 return sdev->queue_depth;
4518 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4519 * @dev: device struct
4520 * @attr: device attribute structure
4524 * number of bytes printed to buffer
4526 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4528 struct scsi_device *sdev = to_scsi_device(dev);
4529 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4530 struct ipr_resource_entry *res;
4531 unsigned long lock_flags = 0;
4532 ssize_t len = -ENXIO;
4534 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4535 res = (struct ipr_resource_entry *)sdev->hostdata;
4537 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4538 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4542 static struct device_attribute ipr_adapter_handle_attr = {
4544 .name = "adapter_handle",
4547 .show = ipr_show_adapter_handle
4551 * ipr_show_resource_path - Show the resource path or the resource address for
4553 * @dev: device struct
4554 * @attr: device attribute structure
4558 * number of bytes printed to buffer
4560 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4562 struct scsi_device *sdev = to_scsi_device(dev);
4563 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4564 struct ipr_resource_entry *res;
4565 unsigned long lock_flags = 0;
4566 ssize_t len = -ENXIO;
4567 char buffer[IPR_MAX_RES_PATH_LENGTH];
4569 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4570 res = (struct ipr_resource_entry *)sdev->hostdata;
4571 if (res && ioa_cfg->sis64)
4572 len = snprintf(buf, PAGE_SIZE, "%s\n",
4573 __ipr_format_res_path(res->res_path, buffer,
4576 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4577 res->bus, res->target, res->lun);
4579 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4583 static struct device_attribute ipr_resource_path_attr = {
4585 .name = "resource_path",
4588 .show = ipr_show_resource_path
4592 * ipr_show_device_id - Show the device_id for this device.
4593 * @dev: device struct
4594 * @attr: device attribute structure
4598 * number of bytes printed to buffer
4600 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4602 struct scsi_device *sdev = to_scsi_device(dev);
4603 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4604 struct ipr_resource_entry *res;
4605 unsigned long lock_flags = 0;
4606 ssize_t len = -ENXIO;
4608 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4609 res = (struct ipr_resource_entry *)sdev->hostdata;
4610 if (res && ioa_cfg->sis64)
4611 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id));
4613 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4615 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4619 static struct device_attribute ipr_device_id_attr = {
4621 .name = "device_id",
4624 .show = ipr_show_device_id
4628 * ipr_show_resource_type - Show the resource type for this device.
4629 * @dev: device struct
4630 * @attr: device attribute structure
4634 * number of bytes printed to buffer
4636 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4638 struct scsi_device *sdev = to_scsi_device(dev);
4639 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4640 struct ipr_resource_entry *res;
4641 unsigned long lock_flags = 0;
4642 ssize_t len = -ENXIO;
4644 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4645 res = (struct ipr_resource_entry *)sdev->hostdata;
4648 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4650 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4654 static struct device_attribute ipr_resource_type_attr = {
4656 .name = "resource_type",
4659 .show = ipr_show_resource_type
4663 * ipr_show_raw_mode - Show the adapter's raw mode
4664 * @dev: class device struct
4668 * number of bytes printed to buffer
4670 static ssize_t ipr_show_raw_mode(struct device *dev,
4671 struct device_attribute *attr, char *buf)
4673 struct scsi_device *sdev = to_scsi_device(dev);
4674 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4675 struct ipr_resource_entry *res;
4676 unsigned long lock_flags = 0;
4679 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4680 res = (struct ipr_resource_entry *)sdev->hostdata;
4682 len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
4685 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4690 * ipr_store_raw_mode - Change the adapter's raw mode
4691 * @dev: class device struct
4695 * number of bytes printed to buffer
4697 static ssize_t ipr_store_raw_mode(struct device *dev,
4698 struct device_attribute *attr,
4699 const char *buf, size_t count)
4701 struct scsi_device *sdev = to_scsi_device(dev);
4702 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4703 struct ipr_resource_entry *res;
4704 unsigned long lock_flags = 0;
4707 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4708 res = (struct ipr_resource_entry *)sdev->hostdata;
4710 if (ipr_is_af_dasd_device(res)) {
4711 res->raw_mode = simple_strtoul(buf, NULL, 10);
4714 sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
4715 res->raw_mode ? "enabled" : "disabled");
4720 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4724 static struct device_attribute ipr_raw_mode_attr = {
4727 .mode = S_IRUGO | S_IWUSR,
4729 .show = ipr_show_raw_mode,
4730 .store = ipr_store_raw_mode
4733 static struct device_attribute *ipr_dev_attrs[] = {
4734 &ipr_adapter_handle_attr,
4735 &ipr_resource_path_attr,
4736 &ipr_device_id_attr,
4737 &ipr_resource_type_attr,
4743 * ipr_biosparam - Return the HSC mapping
4744 * @sdev: scsi device struct
4745 * @block_device: block device pointer
4746 * @capacity: capacity of the device
4747 * @parm: Array containing returned HSC values.
4749 * This function generates the HSC parms that fdisk uses.
4750 * We want to make sure we return something that places partitions
4751 * on 4k boundaries for best performance with the IOA.
4756 static int ipr_biosparam(struct scsi_device *sdev,
4757 struct block_device *block_device,
4758 sector_t capacity, int *parm)
4766 cylinders = capacity;
4767 sector_div(cylinders, (128 * 32));
4772 parm[2] = cylinders;
4778 * ipr_find_starget - Find target based on bus/target.
4779 * @starget: scsi target struct
4782 * resource entry pointer if found / NULL if not found
4784 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4786 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4787 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4788 struct ipr_resource_entry *res;
4790 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4791 if ((res->bus == starget->channel) &&
4792 (res->target == starget->id)) {
4800 static struct ata_port_info sata_port_info;
4803 * ipr_target_alloc - Prepare for commands to a SCSI target
4804 * @starget: scsi target struct
4806 * If the device is a SATA device, this function allocates an
4807 * ATA port with libata, else it does nothing.
4810 * 0 on success / non-0 on failure
4812 static int ipr_target_alloc(struct scsi_target *starget)
4814 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4815 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4816 struct ipr_sata_port *sata_port;
4817 struct ata_port *ap;
4818 struct ipr_resource_entry *res;
4819 unsigned long lock_flags;
4821 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4822 res = ipr_find_starget(starget);
4823 starget->hostdata = NULL;
4825 if (res && ipr_is_gata(res)) {
4826 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4827 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4831 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4833 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4834 sata_port->ioa_cfg = ioa_cfg;
4836 sata_port->res = res;
4838 res->sata_port = sata_port;
4839 ap->private_data = sata_port;
4840 starget->hostdata = sata_port;
4846 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4852 * ipr_target_destroy - Destroy a SCSI target
4853 * @starget: scsi target struct
4855 * If the device was a SATA device, this function frees the libata
4856 * ATA port, else it does nothing.
4859 static void ipr_target_destroy(struct scsi_target *starget)
4861 struct ipr_sata_port *sata_port = starget->hostdata;
4862 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4863 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4865 if (ioa_cfg->sis64) {
4866 if (!ipr_find_starget(starget)) {
4867 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4868 clear_bit(starget->id, ioa_cfg->array_ids);
4869 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4870 clear_bit(starget->id, ioa_cfg->vset_ids);
4871 else if (starget->channel == 0)
4872 clear_bit(starget->id, ioa_cfg->target_ids);
4877 starget->hostdata = NULL;
4878 ata_sas_port_destroy(sata_port->ap);
4884 * ipr_find_sdev - Find device based on bus/target/lun.
4885 * @sdev: scsi device struct
4888 * resource entry pointer if found / NULL if not found
4890 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4892 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4893 struct ipr_resource_entry *res;
4895 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4896 if ((res->bus == sdev->channel) &&
4897 (res->target == sdev->id) &&
4898 (res->lun == sdev->lun))
4906 * ipr_slave_destroy - Unconfigure a SCSI device
4907 * @sdev: scsi device struct
4912 static void ipr_slave_destroy(struct scsi_device *sdev)
4914 struct ipr_resource_entry *res;
4915 struct ipr_ioa_cfg *ioa_cfg;
4916 unsigned long lock_flags = 0;
4918 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4920 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4921 res = (struct ipr_resource_entry *) sdev->hostdata;
4924 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4925 sdev->hostdata = NULL;
4927 res->sata_port = NULL;
4929 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4933 * ipr_slave_configure - Configure a SCSI device
4934 * @sdev: scsi device struct
4936 * This function configures the specified scsi device.
4941 static int ipr_slave_configure(struct scsi_device *sdev)
4943 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4944 struct ipr_resource_entry *res;
4945 struct ata_port *ap = NULL;
4946 unsigned long lock_flags = 0;
4947 char buffer[IPR_MAX_RES_PATH_LENGTH];
4949 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4950 res = sdev->hostdata;
4952 if (ipr_is_af_dasd_device(res))
4953 sdev->type = TYPE_RAID;
4954 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4955 sdev->scsi_level = 4;
4956 sdev->no_uld_attach = 1;
4958 if (ipr_is_vset_device(res)) {
4959 sdev->scsi_level = SCSI_SPC_3;
4960 sdev->no_report_opcodes = 1;
4961 blk_queue_rq_timeout(sdev->request_queue,
4962 IPR_VSET_RW_TIMEOUT);
4963 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4965 if (ipr_is_gata(res) && res->sata_port)
4966 ap = res->sata_port->ap;
4967 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4970 scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
4971 ata_sas_slave_configure(sdev, ap);
4975 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4976 ipr_format_res_path(ioa_cfg,
4977 res->res_path, buffer, sizeof(buffer)));
4980 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4985 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4986 * @sdev: scsi device struct
4988 * This function initializes an ATA port so that future commands
4989 * sent through queuecommand will work.
4994 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4996 struct ipr_sata_port *sata_port = NULL;
5000 if (sdev->sdev_target)
5001 sata_port = sdev->sdev_target->hostdata;
5003 rc = ata_sas_port_init(sata_port->ap);
5005 rc = ata_sas_sync_probe(sata_port->ap);
5009 ipr_slave_destroy(sdev);
5016 * ipr_slave_alloc - Prepare for commands to a device.
5017 * @sdev: scsi device struct
5019 * This function saves a pointer to the resource entry
5020 * in the scsi device struct if the device exists. We
5021 * can then use this pointer in ipr_queuecommand when
5022 * handling new commands.
5025 * 0 on success / -ENXIO if device does not exist
5027 static int ipr_slave_alloc(struct scsi_device *sdev)
5029 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
5030 struct ipr_resource_entry *res;
5031 unsigned long lock_flags;
5034 sdev->hostdata = NULL;
5036 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5038 res = ipr_find_sdev(sdev);
5043 sdev->hostdata = res;
5044 if (!ipr_is_naca_model(res))
5045 res->needs_sync_complete = 1;
5047 if (ipr_is_gata(res)) {
5048 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5049 return ipr_ata_slave_alloc(sdev);
5053 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5059 * ipr_match_lun - Match function for specified LUN
5060 * @ipr_cmd: ipr command struct
5061 * @device: device to match (sdev)
5064 * 1 if command matches sdev / 0 if command does not match sdev
5066 static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
5068 if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
5074 * ipr_cmnd_is_free - Check if a command is free or not
5075 * @ipr_cmd ipr command struct
5080 static bool ipr_cmnd_is_free(struct ipr_cmnd *ipr_cmd)
5082 struct ipr_cmnd *loop_cmd;
5084 list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) {
5085 if (loop_cmd == ipr_cmd)
5093 * ipr_match_res - Match function for specified resource entry
5094 * @ipr_cmd: ipr command struct
5095 * @resource: resource entry to match
5098 * 1 if command matches sdev / 0 if command does not match sdev
5100 static int ipr_match_res(struct ipr_cmnd *ipr_cmd, void *resource)
5102 struct ipr_resource_entry *res = resource;
5104 if (res && ipr_cmd->ioarcb.res_handle == res->res_handle)
5110 * ipr_wait_for_ops - Wait for matching commands to complete
5111 * @ipr_cmd: ipr command struct
5112 * @device: device to match (sdev)
5113 * @match: match function to use
5118 static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
5119 int (*match)(struct ipr_cmnd *, void *))
5121 struct ipr_cmnd *ipr_cmd;
5123 unsigned long flags;
5124 struct ipr_hrr_queue *hrrq;
5125 signed long timeout = IPR_ABORT_TASK_TIMEOUT;
5126 DECLARE_COMPLETION_ONSTACK(comp);
5132 for_each_hrrq(hrrq, ioa_cfg) {
5133 spin_lock_irqsave(hrrq->lock, flags);
5134 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5135 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5136 if (!ipr_cmnd_is_free(ipr_cmd)) {
5137 if (match(ipr_cmd, device)) {
5138 ipr_cmd->eh_comp = ∁
5143 spin_unlock_irqrestore(hrrq->lock, flags);
5147 timeout = wait_for_completion_timeout(&comp, timeout);
5152 for_each_hrrq(hrrq, ioa_cfg) {
5153 spin_lock_irqsave(hrrq->lock, flags);
5154 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5155 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5156 if (!ipr_cmnd_is_free(ipr_cmd)) {
5157 if (match(ipr_cmd, device)) {
5158 ipr_cmd->eh_comp = NULL;
5163 spin_unlock_irqrestore(hrrq->lock, flags);
5167 dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
5169 return wait ? FAILED : SUCCESS;
5178 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
5180 struct ipr_ioa_cfg *ioa_cfg;
5181 unsigned long lock_flags = 0;
5185 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5186 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5188 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5189 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5190 dev_err(&ioa_cfg->pdev->dev,
5191 "Adapter being reset as a result of error recovery.\n");
5193 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5194 ioa_cfg->sdt_state = GET_DUMP;
5197 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5198 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5199 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5201 /* If we got hit with a host reset while we were already resetting
5202 the adapter for some reason, and the reset failed. */
5203 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5208 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5214 * ipr_device_reset - Reset the device
5215 * @ioa_cfg: ioa config struct
5216 * @res: resource entry struct
5218 * This function issues a device reset to the affected device.
5219 * If the device is a SCSI device, a LUN reset will be sent
5220 * to the device first. If that does not work, a target reset
5221 * will be sent. If the device is a SATA device, a PHY reset will
5225 * 0 on success / non-zero on failure
5227 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
5228 struct ipr_resource_entry *res)
5230 struct ipr_cmnd *ipr_cmd;
5231 struct ipr_ioarcb *ioarcb;
5232 struct ipr_cmd_pkt *cmd_pkt;
5233 struct ipr_ioarcb_ata_regs *regs;
5237 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5238 ioarcb = &ipr_cmd->ioarcb;
5239 cmd_pkt = &ioarcb->cmd_pkt;
5241 if (ipr_cmd->ioa_cfg->sis64) {
5242 regs = &ipr_cmd->i.ata_ioadl.regs;
5243 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5245 regs = &ioarcb->u.add_data.u.regs;
5247 ioarcb->res_handle = res->res_handle;
5248 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5249 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5250 if (ipr_is_gata(res)) {
5251 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
5252 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
5253 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5256 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5257 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5258 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5259 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
5260 if (ipr_cmd->ioa_cfg->sis64)
5261 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5262 sizeof(struct ipr_ioasa_gata));
5264 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5265 sizeof(struct ipr_ioasa_gata));
5269 return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
5273 * ipr_sata_reset - Reset the SATA port
5274 * @link: SATA link to reset
5275 * @classes: class of the attached device
5277 * This function issues a SATA phy reset to the affected ATA link.
5280 * 0 on success / non-zero on failure
5282 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
5283 unsigned long deadline)
5285 struct ipr_sata_port *sata_port = link->ap->private_data;
5286 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5287 struct ipr_resource_entry *res;
5288 unsigned long lock_flags = 0;
5289 int rc = -ENXIO, ret;
5292 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5293 while (ioa_cfg->in_reset_reload) {
5294 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5295 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5296 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5299 res = sata_port->res;
5301 rc = ipr_device_reset(ioa_cfg, res);
5302 *classes = res->ata_class;
5303 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5305 ret = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5306 if (ret != SUCCESS) {
5307 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5308 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5309 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5311 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5314 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5321 * ipr_eh_dev_reset - Reset the device
5322 * @scsi_cmd: scsi command struct
5324 * This function issues a device reset to the affected device.
5325 * A LUN reset will be sent to the device first. If that does
5326 * not work, a target reset will be sent.
5331 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
5333 struct ipr_cmnd *ipr_cmd;
5334 struct ipr_ioa_cfg *ioa_cfg;
5335 struct ipr_resource_entry *res;
5336 struct ata_port *ap;
5338 struct ipr_hrr_queue *hrrq;
5341 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5342 res = scsi_cmd->device->hostdata;
5345 * If we are currently going through reset/reload, return failed. This will force the
5346 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5349 if (ioa_cfg->in_reset_reload)
5351 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5354 for_each_hrrq(hrrq, ioa_cfg) {
5355 spin_lock(&hrrq->_lock);
5356 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5357 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5359 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5362 if (ipr_cmnd_is_free(ipr_cmd))
5365 ipr_cmd->done = ipr_sata_eh_done;
5366 if (!(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5367 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5368 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5372 spin_unlock(&hrrq->_lock);
5374 res->resetting_device = 1;
5375 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
5377 if (ipr_is_gata(res) && res->sata_port) {
5378 ap = res->sata_port->ap;
5379 spin_unlock_irq(scsi_cmd->device->host->host_lock);
5380 ata_std_error_handler(ap);
5381 spin_lock_irq(scsi_cmd->device->host->host_lock);
5383 rc = ipr_device_reset(ioa_cfg, res);
5384 res->resetting_device = 0;
5385 res->reset_occurred = 1;
5388 return rc ? FAILED : SUCCESS;
5391 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5394 struct ipr_ioa_cfg *ioa_cfg;
5395 struct ipr_resource_entry *res;
5397 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5398 res = cmd->device->hostdata;
5403 spin_lock_irq(cmd->device->host->host_lock);
5404 rc = __ipr_eh_dev_reset(cmd);
5405 spin_unlock_irq(cmd->device->host->host_lock);
5407 if (rc == SUCCESS) {
5408 if (ipr_is_gata(res) && res->sata_port)
5409 rc = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5411 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5418 * ipr_bus_reset_done - Op done function for bus reset.
5419 * @ipr_cmd: ipr command struct
5421 * This function is the op done function for a bus reset
5426 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5428 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5429 struct ipr_resource_entry *res;
5432 if (!ioa_cfg->sis64)
5433 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5434 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5435 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5441 * If abort has not completed, indicate the reset has, else call the
5442 * abort's done function to wake the sleeping eh thread
5444 if (ipr_cmd->sibling->sibling)
5445 ipr_cmd->sibling->sibling = NULL;
5447 ipr_cmd->sibling->done(ipr_cmd->sibling);
5449 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5454 * ipr_abort_timeout - An abort task has timed out
5455 * @ipr_cmd: ipr command struct
5457 * This function handles when an abort task times out. If this
5458 * happens we issue a bus reset since we have resources tied
5459 * up that must be freed before returning to the midlayer.
5464 static void ipr_abort_timeout(struct timer_list *t)
5466 struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
5467 struct ipr_cmnd *reset_cmd;
5468 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5469 struct ipr_cmd_pkt *cmd_pkt;
5470 unsigned long lock_flags = 0;
5473 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5474 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5475 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5479 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5480 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5481 ipr_cmd->sibling = reset_cmd;
5482 reset_cmd->sibling = ipr_cmd;
5483 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5484 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5485 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5486 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5487 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5489 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5490 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5495 * ipr_cancel_op - Cancel specified op
5496 * @scsi_cmd: scsi command struct
5498 * This function cancels specified op.
5503 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5505 struct ipr_cmnd *ipr_cmd;
5506 struct ipr_ioa_cfg *ioa_cfg;
5507 struct ipr_resource_entry *res;
5508 struct ipr_cmd_pkt *cmd_pkt;
5510 int i, op_found = 0;
5511 struct ipr_hrr_queue *hrrq;
5514 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5515 res = scsi_cmd->device->hostdata;
5517 /* If we are currently going through reset/reload, return failed.
5518 * This will force the mid-layer to call ipr_eh_host_reset,
5519 * which will then go to sleep and wait for the reset to complete
5521 if (ioa_cfg->in_reset_reload ||
5522 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5528 * If we are aborting a timed out op, chances are that the timeout was caused
5529 * by a still not detected EEH error. In such cases, reading a register will
5530 * trigger the EEH recovery infrastructure.
5532 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5534 if (!ipr_is_gscsi(res))
5537 for_each_hrrq(hrrq, ioa_cfg) {
5538 spin_lock(&hrrq->_lock);
5539 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5540 if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) {
5541 if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) {
5547 spin_unlock(&hrrq->_lock);
5553 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5554 ipr_cmd->ioarcb.res_handle = res->res_handle;
5555 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5556 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5557 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5558 ipr_cmd->u.sdev = scsi_cmd->device;
5560 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5562 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5563 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5566 * If the abort task timed out and we sent a bus reset, we will get
5567 * one the following responses to the abort
5569 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5574 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5575 if (!ipr_is_naca_model(res))
5576 res->needs_sync_complete = 1;
5579 return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5583 * ipr_eh_abort - Abort a single op
5584 * @scsi_cmd: scsi command struct
5587 * 0 if scan in progress / 1 if scan is complete
5589 static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5591 unsigned long lock_flags;
5592 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5595 spin_lock_irqsave(shost->host_lock, lock_flags);
5596 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5598 if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5600 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5605 * ipr_eh_host_reset - Reset the host adapter
5606 * @scsi_cmd: scsi command struct
5611 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5613 unsigned long flags;
5615 struct ipr_ioa_cfg *ioa_cfg;
5619 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5621 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5622 rc = ipr_cancel_op(scsi_cmd);
5623 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5626 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
5632 * ipr_handle_other_interrupt - Handle "other" interrupts
5633 * @ioa_cfg: ioa config struct
5634 * @int_reg: interrupt register
5637 * IRQ_NONE / IRQ_HANDLED
5639 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5642 irqreturn_t rc = IRQ_HANDLED;
5645 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5646 int_reg &= ~int_mask_reg;
5648 /* If an interrupt on the adapter did not occur, ignore it.
5649 * Or in the case of SIS 64, check for a stage change interrupt.
5651 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5652 if (ioa_cfg->sis64) {
5653 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5654 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5655 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5657 /* clear stage change */
5658 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5659 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5660 list_del(&ioa_cfg->reset_cmd->queue);
5661 del_timer(&ioa_cfg->reset_cmd->timer);
5662 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5670 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5671 /* Mask the interrupt */
5672 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5673 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5675 list_del(&ioa_cfg->reset_cmd->queue);
5676 del_timer(&ioa_cfg->reset_cmd->timer);
5677 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5678 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5679 if (ioa_cfg->clear_isr) {
5680 if (ipr_debug && printk_ratelimit())
5681 dev_err(&ioa_cfg->pdev->dev,
5682 "Spurious interrupt detected. 0x%08X\n", int_reg);
5683 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5684 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5688 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5689 ioa_cfg->ioa_unit_checked = 1;
5690 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5691 dev_err(&ioa_cfg->pdev->dev,
5692 "No Host RRQ. 0x%08X\n", int_reg);
5694 dev_err(&ioa_cfg->pdev->dev,
5695 "Permanent IOA failure. 0x%08X\n", int_reg);
5697 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5698 ioa_cfg->sdt_state = GET_DUMP;
5700 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5701 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5708 * ipr_isr_eh - Interrupt service routine error handler
5709 * @ioa_cfg: ioa config struct
5710 * @msg: message to log
5715 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5717 ioa_cfg->errors_logged++;
5718 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5720 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5721 ioa_cfg->sdt_state = GET_DUMP;
5723 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5726 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5727 struct list_head *doneq)
5731 struct ipr_cmnd *ipr_cmd;
5732 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5735 /* If interrupts are disabled, ignore the interrupt */
5736 if (!hrr_queue->allow_interrupts)
5739 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5740 hrr_queue->toggle_bit) {
5742 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5743 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5744 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5746 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5747 cmd_index < hrr_queue->min_cmd_id)) {
5749 "Invalid response handle from IOA: ",
5754 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5755 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5757 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5759 list_move_tail(&ipr_cmd->queue, doneq);
5761 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5762 hrr_queue->hrrq_curr++;
5764 hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5765 hrr_queue->toggle_bit ^= 1u;
5768 if (budget > 0 && num_hrrq >= budget)
5775 static int ipr_iopoll(struct irq_poll *iop, int budget)
5777 struct ipr_ioa_cfg *ioa_cfg;
5778 struct ipr_hrr_queue *hrrq;
5779 struct ipr_cmnd *ipr_cmd, *temp;
5780 unsigned long hrrq_flags;
5784 hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5785 ioa_cfg = hrrq->ioa_cfg;
5787 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5788 completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5790 if (completed_ops < budget)
5791 irq_poll_complete(iop);
5792 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5794 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5795 list_del(&ipr_cmd->queue);
5796 del_timer(&ipr_cmd->timer);
5797 ipr_cmd->fast_done(ipr_cmd);
5800 return completed_ops;
5804 * ipr_isr - Interrupt service routine
5806 * @devp: pointer to ioa config struct
5809 * IRQ_NONE / IRQ_HANDLED
5811 static irqreturn_t ipr_isr(int irq, void *devp)
5813 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5814 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5815 unsigned long hrrq_flags = 0;
5819 struct ipr_cmnd *ipr_cmd, *temp;
5820 irqreturn_t rc = IRQ_NONE;
5823 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5824 /* If interrupts are disabled, ignore the interrupt */
5825 if (!hrrq->allow_interrupts) {
5826 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5831 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5834 if (!ioa_cfg->clear_isr)
5837 /* Clear the PCI interrupt */
5840 writel(IPR_PCII_HRRQ_UPDATED,
5841 ioa_cfg->regs.clr_interrupt_reg32);
5842 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5843 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5844 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5846 } else if (rc == IRQ_NONE && irq_none == 0) {
5847 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5849 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5850 int_reg & IPR_PCII_HRRQ_UPDATED) {
5852 "Error clearing HRRQ: ", num_hrrq);
5859 if (unlikely(rc == IRQ_NONE))
5860 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5862 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5863 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5864 list_del(&ipr_cmd->queue);
5865 del_timer(&ipr_cmd->timer);
5866 ipr_cmd->fast_done(ipr_cmd);
5872 * ipr_isr_mhrrq - Interrupt service routine
5874 * @devp: pointer to ioa config struct
5877 * IRQ_NONE / IRQ_HANDLED
5879 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5881 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5882 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5883 unsigned long hrrq_flags = 0;
5884 struct ipr_cmnd *ipr_cmd, *temp;
5885 irqreturn_t rc = IRQ_NONE;
5888 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5890 /* If interrupts are disabled, ignore the interrupt */
5891 if (!hrrq->allow_interrupts) {
5892 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5896 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5897 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5899 irq_poll_sched(&hrrq->iopoll);
5900 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5904 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5907 if (ipr_process_hrrq(hrrq, -1, &doneq))
5911 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5913 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5914 list_del(&ipr_cmd->queue);
5915 del_timer(&ipr_cmd->timer);
5916 ipr_cmd->fast_done(ipr_cmd);
5922 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5923 * @ioa_cfg: ioa config struct
5924 * @ipr_cmd: ipr command struct
5927 * 0 on success / -1 on failure
5929 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5930 struct ipr_cmnd *ipr_cmd)
5933 struct scatterlist *sg;
5935 u32 ioadl_flags = 0;
5936 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5937 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5938 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5940 length = scsi_bufflen(scsi_cmd);
5944 nseg = scsi_dma_map(scsi_cmd);
5946 if (printk_ratelimit())
5947 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5951 ipr_cmd->dma_use_sg = nseg;
5953 ioarcb->data_transfer_length = cpu_to_be32(length);
5955 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5957 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5958 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5959 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5960 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5961 ioadl_flags = IPR_IOADL_FLAGS_READ;
5963 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5964 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5965 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5966 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5969 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5974 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5975 * @ioa_cfg: ioa config struct
5976 * @ipr_cmd: ipr command struct
5979 * 0 on success / -1 on failure
5981 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5982 struct ipr_cmnd *ipr_cmd)
5985 struct scatterlist *sg;
5987 u32 ioadl_flags = 0;
5988 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5989 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5990 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5992 length = scsi_bufflen(scsi_cmd);
5996 nseg = scsi_dma_map(scsi_cmd);
5998 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
6002 ipr_cmd->dma_use_sg = nseg;
6004 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
6005 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6006 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6007 ioarcb->data_transfer_length = cpu_to_be32(length);
6009 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6010 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
6011 ioadl_flags = IPR_IOADL_FLAGS_READ;
6012 ioarcb->read_data_transfer_length = cpu_to_be32(length);
6013 ioarcb->read_ioadl_len =
6014 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6017 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
6018 ioadl = ioarcb->u.add_data.u.ioadl;
6019 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
6020 offsetof(struct ipr_ioarcb, u.add_data));
6021 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6024 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
6025 ioadl[i].flags_and_data_len =
6026 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6027 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
6030 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6035 * __ipr_erp_done - Process completion of ERP for a device
6036 * @ipr_cmd: ipr command struct
6038 * This function copies the sense buffer into the scsi_cmd
6039 * struct and pushes the scsi_done function.
6044 static void __ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6046 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6047 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6048 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6050 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6051 scsi_cmd->result |= (DID_ERROR << 16);
6052 scmd_printk(KERN_ERR, scsi_cmd,
6053 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
6055 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
6056 SCSI_SENSE_BUFFERSIZE);
6060 if (!ipr_is_naca_model(res))
6061 res->needs_sync_complete = 1;
6064 scsi_dma_unmap(ipr_cmd->scsi_cmd);
6065 scsi_cmd->scsi_done(scsi_cmd);
6066 if (ipr_cmd->eh_comp)
6067 complete(ipr_cmd->eh_comp);
6068 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6072 * ipr_erp_done - Process completion of ERP for a device
6073 * @ipr_cmd: ipr command struct
6075 * This function copies the sense buffer into the scsi_cmd
6076 * struct and pushes the scsi_done function.
6081 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6083 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6084 unsigned long hrrq_flags;
6086 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6087 __ipr_erp_done(ipr_cmd);
6088 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6092 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
6093 * @ipr_cmd: ipr command struct
6098 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
6100 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6101 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6102 dma_addr_t dma_addr = ipr_cmd->dma_addr;
6104 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
6105 ioarcb->data_transfer_length = 0;
6106 ioarcb->read_data_transfer_length = 0;
6107 ioarcb->ioadl_len = 0;
6108 ioarcb->read_ioadl_len = 0;
6109 ioasa->hdr.ioasc = 0;
6110 ioasa->hdr.residual_data_len = 0;
6112 if (ipr_cmd->ioa_cfg->sis64)
6113 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6114 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
6116 ioarcb->write_ioadl_addr =
6117 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
6118 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6123 * __ipr_erp_request_sense - Send request sense to a device
6124 * @ipr_cmd: ipr command struct
6126 * This function sends a request sense to a device as a result
6127 * of a check condition.
6132 static void __ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6134 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6135 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6137 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6138 __ipr_erp_done(ipr_cmd);
6142 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6144 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
6145 cmd_pkt->cdb[0] = REQUEST_SENSE;
6146 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
6147 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
6148 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6149 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
6151 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
6152 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
6154 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
6155 IPR_REQUEST_SENSE_TIMEOUT * 2);
6159 * ipr_erp_request_sense - Send request sense to a device
6160 * @ipr_cmd: ipr command struct
6162 * This function sends a request sense to a device as a result
6163 * of a check condition.
6168 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6170 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6171 unsigned long hrrq_flags;
6173 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6174 __ipr_erp_request_sense(ipr_cmd);
6175 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6179 * ipr_erp_cancel_all - Send cancel all to a device
6180 * @ipr_cmd: ipr command struct
6182 * This function sends a cancel all to a device to clear the
6183 * queue. If we are running TCQ on the device, QERR is set to 1,
6184 * which means all outstanding ops have been dropped on the floor.
6185 * Cancel all will return them to us.
6190 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
6192 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6193 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6194 struct ipr_cmd_pkt *cmd_pkt;
6198 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6200 if (!scsi_cmd->device->simple_tags) {
6201 __ipr_erp_request_sense(ipr_cmd);
6205 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6206 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
6207 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
6209 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
6210 IPR_CANCEL_ALL_TIMEOUT);
6214 * ipr_dump_ioasa - Dump contents of IOASA
6215 * @ioa_cfg: ioa config struct
6216 * @ipr_cmd: ipr command struct
6217 * @res: resource entry struct
6219 * This function is invoked by the interrupt handler when ops
6220 * fail. It will log the IOASA if appropriate. Only called
6226 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
6227 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
6231 u32 ioasc, fd_ioasc;
6232 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6233 __be32 *ioasa_data = (__be32 *)ioasa;
6236 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
6237 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
6242 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
6245 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
6246 error_index = ipr_get_error(fd_ioasc);
6248 error_index = ipr_get_error(ioasc);
6250 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
6251 /* Don't log an error if the IOA already logged one */
6252 if (ioasa->hdr.ilid != 0)
6255 if (!ipr_is_gscsi(res))
6258 if (ipr_error_table[error_index].log_ioasa == 0)
6262 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
6264 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
6265 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
6266 data_len = sizeof(struct ipr_ioasa64);
6267 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
6268 data_len = sizeof(struct ipr_ioasa);
6270 ipr_err("IOASA Dump:\n");
6272 for (i = 0; i < data_len / 4; i += 4) {
6273 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
6274 be32_to_cpu(ioasa_data[i]),
6275 be32_to_cpu(ioasa_data[i+1]),
6276 be32_to_cpu(ioasa_data[i+2]),
6277 be32_to_cpu(ioasa_data[i+3]));
6282 * ipr_gen_sense - Generate SCSI sense data from an IOASA
6284 * @sense_buf: sense data buffer
6289 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
6292 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
6293 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
6294 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6295 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
6297 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
6299 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
6302 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
6304 if (ipr_is_vset_device(res) &&
6305 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
6306 ioasa->u.vset.failing_lba_hi != 0) {
6307 sense_buf[0] = 0x72;
6308 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
6309 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
6310 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
6314 sense_buf[9] = 0x0A;
6315 sense_buf[10] = 0x80;
6317 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
6319 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
6320 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
6321 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
6322 sense_buf[15] = failing_lba & 0x000000ff;
6324 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6326 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
6327 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
6328 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
6329 sense_buf[19] = failing_lba & 0x000000ff;
6331 sense_buf[0] = 0x70;
6332 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
6333 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
6334 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
6336 /* Illegal request */
6337 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
6338 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
6339 sense_buf[7] = 10; /* additional length */
6341 /* IOARCB was in error */
6342 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
6343 sense_buf[15] = 0xC0;
6344 else /* Parameter data was invalid */
6345 sense_buf[15] = 0x80;
6348 ((IPR_FIELD_POINTER_MASK &
6349 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
6351 (IPR_FIELD_POINTER_MASK &
6352 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
6354 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
6355 if (ipr_is_vset_device(res))
6356 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6358 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6360 sense_buf[0] |= 0x80; /* Or in the Valid bit */
6361 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6362 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6363 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6364 sense_buf[6] = failing_lba & 0x000000ff;
6367 sense_buf[7] = 6; /* additional length */
6373 * ipr_get_autosense - Copy autosense data to sense buffer
6374 * @ipr_cmd: ipr command struct
6376 * This function copies the autosense buffer to the buffer
6377 * in the scsi_cmd, if there is autosense available.
6380 * 1 if autosense was available / 0 if not
6382 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6384 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6385 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
6387 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
6390 if (ipr_cmd->ioa_cfg->sis64)
6391 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6392 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6393 SCSI_SENSE_BUFFERSIZE));
6395 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6396 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6397 SCSI_SENSE_BUFFERSIZE));
6402 * ipr_erp_start - Process an error response for a SCSI op
6403 * @ioa_cfg: ioa config struct
6404 * @ipr_cmd: ipr command struct
6406 * This function determines whether or not to initiate ERP
6407 * on the affected device.
6412 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6413 struct ipr_cmnd *ipr_cmd)
6415 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6416 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6417 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6418 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
6421 __ipr_scsi_eh_done(ipr_cmd);
6425 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6426 ipr_gen_sense(ipr_cmd);
6428 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6430 switch (masked_ioasc) {
6431 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6432 if (ipr_is_naca_model(res))
6433 scsi_cmd->result |= (DID_ABORT << 16);
6435 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6437 case IPR_IOASC_IR_RESOURCE_HANDLE:
6438 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6439 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6441 case IPR_IOASC_HW_SEL_TIMEOUT:
6442 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6443 if (!ipr_is_naca_model(res))
6444 res->needs_sync_complete = 1;
6446 case IPR_IOASC_SYNC_REQUIRED:
6448 res->needs_sync_complete = 1;
6449 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6451 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6452 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6454 * exception: do not set DID_PASSTHROUGH on CHECK CONDITION
6455 * so SCSI mid-layer and upper layers handle it accordingly.
6457 if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION)
6458 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6460 case IPR_IOASC_BUS_WAS_RESET:
6461 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6463 * Report the bus reset and ask for a retry. The device
6464 * will give CC/UA the next command.
6466 if (!res->resetting_device)
6467 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6468 scsi_cmd->result |= (DID_ERROR << 16);
6469 if (!ipr_is_naca_model(res))
6470 res->needs_sync_complete = 1;
6472 case IPR_IOASC_HW_DEV_BUS_STATUS:
6473 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6474 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6475 if (!ipr_get_autosense(ipr_cmd)) {
6476 if (!ipr_is_naca_model(res)) {
6477 ipr_erp_cancel_all(ipr_cmd);
6482 if (!ipr_is_naca_model(res))
6483 res->needs_sync_complete = 1;
6485 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6487 case IPR_IOASC_IR_NON_OPTIMIZED:
6488 if (res->raw_mode) {
6490 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6492 scsi_cmd->result |= (DID_ERROR << 16);
6495 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6496 scsi_cmd->result |= (DID_ERROR << 16);
6497 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6498 res->needs_sync_complete = 1;
6502 scsi_dma_unmap(ipr_cmd->scsi_cmd);
6503 scsi_cmd->scsi_done(scsi_cmd);
6504 if (ipr_cmd->eh_comp)
6505 complete(ipr_cmd->eh_comp);
6506 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6510 * ipr_scsi_done - mid-layer done function
6511 * @ipr_cmd: ipr command struct
6513 * This function is invoked by the interrupt handler for
6514 * ops generated by the SCSI mid-layer
6519 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6521 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6522 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6523 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6524 unsigned long lock_flags;
6526 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6528 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6529 scsi_dma_unmap(scsi_cmd);
6531 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
6532 scsi_cmd->scsi_done(scsi_cmd);
6533 if (ipr_cmd->eh_comp)
6534 complete(ipr_cmd->eh_comp);
6535 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6536 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
6538 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6539 spin_lock(&ipr_cmd->hrrq->_lock);
6540 ipr_erp_start(ioa_cfg, ipr_cmd);
6541 spin_unlock(&ipr_cmd->hrrq->_lock);
6542 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6547 * ipr_queuecommand - Queue a mid-layer request
6548 * @shost: scsi host struct
6549 * @scsi_cmd: scsi command struct
6551 * This function queues a request generated by the mid-layer.
6555 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6556 * SCSI_MLQUEUE_HOST_BUSY if host is busy
6558 static int ipr_queuecommand(struct Scsi_Host *shost,
6559 struct scsi_cmnd *scsi_cmd)
6561 struct ipr_ioa_cfg *ioa_cfg;
6562 struct ipr_resource_entry *res;
6563 struct ipr_ioarcb *ioarcb;
6564 struct ipr_cmnd *ipr_cmd;
6565 unsigned long hrrq_flags, lock_flags;
6567 struct ipr_hrr_queue *hrrq;
6570 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6572 scsi_cmd->result = (DID_OK << 16);
6573 res = scsi_cmd->device->hostdata;
6575 if (ipr_is_gata(res) && res->sata_port) {
6576 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6577 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6578 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6582 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6583 hrrq = &ioa_cfg->hrrq[hrrq_id];
6585 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6587 * We are currently blocking all devices due to a host reset
6588 * We have told the host to stop giving us new requests, but
6589 * ERP ops don't count. FIXME
6591 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6592 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6593 return SCSI_MLQUEUE_HOST_BUSY;
6597 * FIXME - Create scsi_set_host_offline interface
6598 * and the ioa_is_dead check can be removed
6600 if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6601 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6605 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6606 if (ipr_cmd == NULL) {
6607 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6608 return SCSI_MLQUEUE_HOST_BUSY;
6610 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6612 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6613 ioarcb = &ipr_cmd->ioarcb;
6615 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6616 ipr_cmd->scsi_cmd = scsi_cmd;
6617 ipr_cmd->done = ipr_scsi_eh_done;
6619 if (ipr_is_gscsi(res)) {
6620 if (scsi_cmd->underflow == 0)
6621 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6623 if (res->reset_occurred) {
6624 res->reset_occurred = 0;
6625 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6629 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6630 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6632 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6633 if (scsi_cmd->flags & SCMD_TAGGED)
6634 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6636 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
6639 if (scsi_cmd->cmnd[0] >= 0xC0 &&
6640 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6641 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6643 if (res->raw_mode && ipr_is_af_dasd_device(res)) {
6644 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
6646 if (scsi_cmd->underflow == 0)
6647 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6651 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6653 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6655 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6656 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6657 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6658 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6660 scsi_dma_unmap(scsi_cmd);
6661 return SCSI_MLQUEUE_HOST_BUSY;
6664 if (unlikely(hrrq->ioa_is_dead)) {
6665 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6666 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6667 scsi_dma_unmap(scsi_cmd);
6671 ioarcb->res_handle = res->res_handle;
6672 if (res->needs_sync_complete) {
6673 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6674 res->needs_sync_complete = 0;
6676 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6677 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6678 ipr_send_command(ipr_cmd);
6679 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6683 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6684 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6685 scsi_cmd->result = (DID_NO_CONNECT << 16);
6686 scsi_cmd->scsi_done(scsi_cmd);
6687 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6692 * ipr_ioctl - IOCTL handler
6693 * @sdev: scsi device struct
6698 * 0 on success / other on failure
6700 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
6702 struct ipr_resource_entry *res;
6704 res = (struct ipr_resource_entry *)sdev->hostdata;
6705 if (res && ipr_is_gata(res)) {
6706 if (cmd == HDIO_GET_IDENTITY)
6708 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
6715 * ipr_info - Get information about the card/driver
6716 * @scsi_host: scsi host struct
6719 * pointer to buffer with description string
6721 static const char *ipr_ioa_info(struct Scsi_Host *host)
6723 static char buffer[512];
6724 struct ipr_ioa_cfg *ioa_cfg;
6725 unsigned long lock_flags = 0;
6727 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6729 spin_lock_irqsave(host->host_lock, lock_flags);
6730 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6731 spin_unlock_irqrestore(host->host_lock, lock_flags);
6736 static struct scsi_host_template driver_template = {
6737 .module = THIS_MODULE,
6739 .info = ipr_ioa_info,
6741 .queuecommand = ipr_queuecommand,
6742 .eh_abort_handler = ipr_eh_abort,
6743 .eh_device_reset_handler = ipr_eh_dev_reset,
6744 .eh_host_reset_handler = ipr_eh_host_reset,
6745 .slave_alloc = ipr_slave_alloc,
6746 .slave_configure = ipr_slave_configure,
6747 .slave_destroy = ipr_slave_destroy,
6748 .scan_finished = ipr_scan_finished,
6749 .target_alloc = ipr_target_alloc,
6750 .target_destroy = ipr_target_destroy,
6751 .change_queue_depth = ipr_change_queue_depth,
6752 .bios_param = ipr_biosparam,
6753 .can_queue = IPR_MAX_COMMANDS,
6755 .sg_tablesize = IPR_MAX_SGLIST,
6756 .max_sectors = IPR_IOA_MAX_SECTORS,
6757 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6758 .use_clustering = ENABLE_CLUSTERING,
6759 .shost_attrs = ipr_ioa_attrs,
6760 .sdev_attrs = ipr_dev_attrs,
6761 .proc_name = IPR_NAME,
6765 * ipr_ata_phy_reset - libata phy_reset handler
6766 * @ap: ata port to reset
6769 static void ipr_ata_phy_reset(struct ata_port *ap)
6771 unsigned long flags;
6772 struct ipr_sata_port *sata_port = ap->private_data;
6773 struct ipr_resource_entry *res = sata_port->res;
6774 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6778 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6779 while (ioa_cfg->in_reset_reload) {
6780 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6781 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6782 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6785 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6788 rc = ipr_device_reset(ioa_cfg, res);
6791 ap->link.device[0].class = ATA_DEV_NONE;
6795 ap->link.device[0].class = res->ata_class;
6796 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6797 ap->link.device[0].class = ATA_DEV_NONE;
6800 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6805 * ipr_ata_post_internal - Cleanup after an internal command
6806 * @qc: ATA queued command
6811 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6813 struct ipr_sata_port *sata_port = qc->ap->private_data;
6814 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6815 struct ipr_cmnd *ipr_cmd;
6816 struct ipr_hrr_queue *hrrq;
6817 unsigned long flags;
6819 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6820 while (ioa_cfg->in_reset_reload) {
6821 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6822 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6823 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6826 for_each_hrrq(hrrq, ioa_cfg) {
6827 spin_lock(&hrrq->_lock);
6828 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6829 if (ipr_cmd->qc == qc) {
6830 ipr_device_reset(ioa_cfg, sata_port->res);
6834 spin_unlock(&hrrq->_lock);
6836 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6840 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6841 * @regs: destination
6842 * @tf: source ATA taskfile
6847 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6848 struct ata_taskfile *tf)
6850 regs->feature = tf->feature;
6851 regs->nsect = tf->nsect;
6852 regs->lbal = tf->lbal;
6853 regs->lbam = tf->lbam;
6854 regs->lbah = tf->lbah;
6855 regs->device = tf->device;
6856 regs->command = tf->command;
6857 regs->hob_feature = tf->hob_feature;
6858 regs->hob_nsect = tf->hob_nsect;
6859 regs->hob_lbal = tf->hob_lbal;
6860 regs->hob_lbam = tf->hob_lbam;
6861 regs->hob_lbah = tf->hob_lbah;
6862 regs->ctl = tf->ctl;
6866 * ipr_sata_done - done function for SATA commands
6867 * @ipr_cmd: ipr command struct
6869 * This function is invoked by the interrupt handler for
6870 * ops generated by the SCSI mid-layer to SATA devices
6875 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6877 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6878 struct ata_queued_cmd *qc = ipr_cmd->qc;
6879 struct ipr_sata_port *sata_port = qc->ap->private_data;
6880 struct ipr_resource_entry *res = sata_port->res;
6881 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6883 spin_lock(&ipr_cmd->hrrq->_lock);
6884 if (ipr_cmd->ioa_cfg->sis64)
6885 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6886 sizeof(struct ipr_ioasa_gata));
6888 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6889 sizeof(struct ipr_ioasa_gata));
6890 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6892 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6893 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6895 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6896 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6898 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6899 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6900 spin_unlock(&ipr_cmd->hrrq->_lock);
6901 ata_qc_complete(qc);
6905 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6906 * @ipr_cmd: ipr command struct
6907 * @qc: ATA queued command
6910 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6911 struct ata_queued_cmd *qc)
6913 u32 ioadl_flags = 0;
6914 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6915 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
6916 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6917 int len = qc->nbytes;
6918 struct scatterlist *sg;
6920 dma_addr_t dma_addr = ipr_cmd->dma_addr;
6925 if (qc->dma_dir == DMA_TO_DEVICE) {
6926 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6927 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6928 } else if (qc->dma_dir == DMA_FROM_DEVICE)
6929 ioadl_flags = IPR_IOADL_FLAGS_READ;
6931 ioarcb->data_transfer_length = cpu_to_be32(len);
6933 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6934 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6935 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
6937 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6938 ioadl64->flags = cpu_to_be32(ioadl_flags);
6939 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6940 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6942 last_ioadl64 = ioadl64;
6946 if (likely(last_ioadl64))
6947 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6951 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6952 * @ipr_cmd: ipr command struct
6953 * @qc: ATA queued command
6956 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6957 struct ata_queued_cmd *qc)
6959 u32 ioadl_flags = 0;
6960 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6961 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6962 struct ipr_ioadl_desc *last_ioadl = NULL;
6963 int len = qc->nbytes;
6964 struct scatterlist *sg;
6970 if (qc->dma_dir == DMA_TO_DEVICE) {
6971 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6972 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6973 ioarcb->data_transfer_length = cpu_to_be32(len);
6975 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6976 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6977 ioadl_flags = IPR_IOADL_FLAGS_READ;
6978 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6979 ioarcb->read_ioadl_len =
6980 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6983 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6984 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6985 ioadl->address = cpu_to_be32(sg_dma_address(sg));
6991 if (likely(last_ioadl))
6992 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6996 * ipr_qc_defer - Get a free ipr_cmd
6997 * @qc: queued command
7002 static int ipr_qc_defer(struct ata_queued_cmd *qc)
7004 struct ata_port *ap = qc->ap;
7005 struct ipr_sata_port *sata_port = ap->private_data;
7006 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
7007 struct ipr_cmnd *ipr_cmd;
7008 struct ipr_hrr_queue *hrrq;
7011 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
7012 hrrq = &ioa_cfg->hrrq[hrrq_id];
7014 qc->lldd_task = NULL;
7015 spin_lock(&hrrq->_lock);
7016 if (unlikely(hrrq->ioa_is_dead)) {
7017 spin_unlock(&hrrq->_lock);
7021 if (unlikely(!hrrq->allow_cmds)) {
7022 spin_unlock(&hrrq->_lock);
7023 return ATA_DEFER_LINK;
7026 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
7027 if (ipr_cmd == NULL) {
7028 spin_unlock(&hrrq->_lock);
7029 return ATA_DEFER_LINK;
7032 qc->lldd_task = ipr_cmd;
7033 spin_unlock(&hrrq->_lock);
7038 * ipr_qc_issue - Issue a SATA qc to a device
7039 * @qc: queued command
7044 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
7046 struct ata_port *ap = qc->ap;
7047 struct ipr_sata_port *sata_port = ap->private_data;
7048 struct ipr_resource_entry *res = sata_port->res;
7049 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
7050 struct ipr_cmnd *ipr_cmd;
7051 struct ipr_ioarcb *ioarcb;
7052 struct ipr_ioarcb_ata_regs *regs;
7054 if (qc->lldd_task == NULL)
7057 ipr_cmd = qc->lldd_task;
7058 if (ipr_cmd == NULL)
7059 return AC_ERR_SYSTEM;
7061 qc->lldd_task = NULL;
7062 spin_lock(&ipr_cmd->hrrq->_lock);
7063 if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
7064 ipr_cmd->hrrq->ioa_is_dead)) {
7065 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7066 spin_unlock(&ipr_cmd->hrrq->_lock);
7067 return AC_ERR_SYSTEM;
7070 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
7071 ioarcb = &ipr_cmd->ioarcb;
7073 if (ioa_cfg->sis64) {
7074 regs = &ipr_cmd->i.ata_ioadl.regs;
7075 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
7077 regs = &ioarcb->u.add_data.u.regs;
7079 memset(regs, 0, sizeof(*regs));
7080 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
7082 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7084 ipr_cmd->done = ipr_sata_done;
7085 ipr_cmd->ioarcb.res_handle = res->res_handle;
7086 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
7087 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
7088 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
7089 ipr_cmd->dma_use_sg = qc->n_elem;
7092 ipr_build_ata_ioadl64(ipr_cmd, qc);
7094 ipr_build_ata_ioadl(ipr_cmd, qc);
7096 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
7097 ipr_copy_sata_tf(regs, &qc->tf);
7098 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
7099 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
7101 switch (qc->tf.protocol) {
7102 case ATA_PROT_NODATA:
7107 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7110 case ATAPI_PROT_PIO:
7111 case ATAPI_PROT_NODATA:
7112 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7115 case ATAPI_PROT_DMA:
7116 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7117 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7122 spin_unlock(&ipr_cmd->hrrq->_lock);
7123 return AC_ERR_INVALID;
7126 ipr_send_command(ipr_cmd);
7127 spin_unlock(&ipr_cmd->hrrq->_lock);
7133 * ipr_qc_fill_rtf - Read result TF
7134 * @qc: ATA queued command
7139 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
7141 struct ipr_sata_port *sata_port = qc->ap->private_data;
7142 struct ipr_ioasa_gata *g = &sata_port->ioasa;
7143 struct ata_taskfile *tf = &qc->result_tf;
7145 tf->feature = g->error;
7146 tf->nsect = g->nsect;
7150 tf->device = g->device;
7151 tf->command = g->status;
7152 tf->hob_nsect = g->hob_nsect;
7153 tf->hob_lbal = g->hob_lbal;
7154 tf->hob_lbam = g->hob_lbam;
7155 tf->hob_lbah = g->hob_lbah;
7160 static struct ata_port_operations ipr_sata_ops = {
7161 .phy_reset = ipr_ata_phy_reset,
7162 .hardreset = ipr_sata_reset,
7163 .post_internal_cmd = ipr_ata_post_internal,
7164 .qc_prep = ata_noop_qc_prep,
7165 .qc_defer = ipr_qc_defer,
7166 .qc_issue = ipr_qc_issue,
7167 .qc_fill_rtf = ipr_qc_fill_rtf,
7168 .port_start = ata_sas_port_start,
7169 .port_stop = ata_sas_port_stop
7172 static struct ata_port_info sata_port_info = {
7173 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
7175 .pio_mask = ATA_PIO4_ONLY,
7176 .mwdma_mask = ATA_MWDMA2,
7177 .udma_mask = ATA_UDMA6,
7178 .port_ops = &ipr_sata_ops
7181 #ifdef CONFIG_PPC_PSERIES
7182 static const u16 ipr_blocked_processors[] = {
7194 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
7195 * @ioa_cfg: ioa cfg struct
7197 * Adapters that use Gemstone revision < 3.1 do not work reliably on
7198 * certain pSeries hardware. This function determines if the given
7199 * adapter is in one of these confgurations or not.
7202 * 1 if adapter is not supported / 0 if adapter is supported
7204 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
7208 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
7209 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
7210 if (pvr_version_is(ipr_blocked_processors[i]))
7217 #define ipr_invalid_adapter(ioa_cfg) 0
7221 * ipr_ioa_bringdown_done - IOA bring down completion.
7222 * @ipr_cmd: ipr command struct
7224 * This function processes the completion of an adapter bring down.
7225 * It wakes any reset sleepers.
7230 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
7232 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7236 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
7238 ioa_cfg->scsi_unblock = 1;
7239 schedule_work(&ioa_cfg->work_q);
7242 ioa_cfg->in_reset_reload = 0;
7243 ioa_cfg->reset_retries = 0;
7244 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7245 spin_lock(&ioa_cfg->hrrq[i]._lock);
7246 ioa_cfg->hrrq[i].ioa_is_dead = 1;
7247 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7251 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7252 wake_up_all(&ioa_cfg->reset_wait_q);
7255 return IPR_RC_JOB_RETURN;
7259 * ipr_ioa_reset_done - IOA reset completion.
7260 * @ipr_cmd: ipr command struct
7262 * This function processes the completion of an adapter reset.
7263 * It schedules any necessary mid-layer add/removes and
7264 * wakes any reset sleepers.
7269 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
7271 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7272 struct ipr_resource_entry *res;
7276 ioa_cfg->in_reset_reload = 0;
7277 for (j = 0; j < ioa_cfg->hrrq_num; j++) {
7278 spin_lock(&ioa_cfg->hrrq[j]._lock);
7279 ioa_cfg->hrrq[j].allow_cmds = 1;
7280 spin_unlock(&ioa_cfg->hrrq[j]._lock);
7283 ioa_cfg->reset_cmd = NULL;
7284 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
7286 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
7287 if (res->add_to_ml || res->del_from_ml) {
7292 schedule_work(&ioa_cfg->work_q);
7294 for (j = 0; j < IPR_NUM_HCAMS; j++) {
7295 list_del_init(&ioa_cfg->hostrcb[j]->queue);
7296 if (j < IPR_NUM_LOG_HCAMS)
7297 ipr_send_hcam(ioa_cfg,
7298 IPR_HCAM_CDB_OP_CODE_LOG_DATA,
7299 ioa_cfg->hostrcb[j]);
7301 ipr_send_hcam(ioa_cfg,
7302 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
7303 ioa_cfg->hostrcb[j]);
7306 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
7307 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
7309 ioa_cfg->reset_retries = 0;
7310 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7311 wake_up_all(&ioa_cfg->reset_wait_q);
7313 ioa_cfg->scsi_unblock = 1;
7314 schedule_work(&ioa_cfg->work_q);
7316 return IPR_RC_JOB_RETURN;
7320 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7321 * @supported_dev: supported device struct
7322 * @vpids: vendor product id struct
7327 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
7328 struct ipr_std_inq_vpids *vpids)
7330 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
7331 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
7332 supported_dev->num_records = 1;
7333 supported_dev->data_length =
7334 cpu_to_be16(sizeof(struct ipr_supported_device));
7335 supported_dev->reserved = 0;
7339 * ipr_set_supported_devs - Send Set Supported Devices for a device
7340 * @ipr_cmd: ipr command struct
7342 * This function sends a Set Supported Devices to the adapter
7345 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7347 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
7349 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7350 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
7351 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7352 struct ipr_resource_entry *res = ipr_cmd->u.res;
7354 ipr_cmd->job_step = ipr_ioa_reset_done;
7356 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
7357 if (!ipr_is_scsi_disk(res))
7360 ipr_cmd->u.res = res;
7361 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
7363 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7364 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7365 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7367 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
7368 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
7369 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
7370 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
7372 ipr_init_ioadl(ipr_cmd,
7373 ioa_cfg->vpd_cbs_dma +
7374 offsetof(struct ipr_misc_cbs, supp_dev),
7375 sizeof(struct ipr_supported_device),
7376 IPR_IOADL_FLAGS_WRITE_LAST);
7378 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7379 IPR_SET_SUP_DEVICE_TIMEOUT);
7381 if (!ioa_cfg->sis64)
7382 ipr_cmd->job_step = ipr_set_supported_devs;
7384 return IPR_RC_JOB_RETURN;
7388 return IPR_RC_JOB_CONTINUE;
7392 * ipr_get_mode_page - Locate specified mode page
7393 * @mode_pages: mode page buffer
7394 * @page_code: page code to find
7395 * @len: minimum required length for mode page
7398 * pointer to mode page / NULL on failure
7400 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7401 u32 page_code, u32 len)
7403 struct ipr_mode_page_hdr *mode_hdr;
7407 if (!mode_pages || (mode_pages->hdr.length == 0))
7410 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7411 mode_hdr = (struct ipr_mode_page_hdr *)
7412 (mode_pages->data + mode_pages->hdr.block_desc_len);
7415 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7416 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7420 page_length = (sizeof(struct ipr_mode_page_hdr) +
7421 mode_hdr->page_length);
7422 length -= page_length;
7423 mode_hdr = (struct ipr_mode_page_hdr *)
7424 ((unsigned long)mode_hdr + page_length);
7431 * ipr_check_term_power - Check for term power errors
7432 * @ioa_cfg: ioa config struct
7433 * @mode_pages: IOAFP mode pages buffer
7435 * Check the IOAFP's mode page 28 for term power errors
7440 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7441 struct ipr_mode_pages *mode_pages)
7445 struct ipr_dev_bus_entry *bus;
7446 struct ipr_mode_page28 *mode_page;
7448 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7449 sizeof(struct ipr_mode_page28));
7451 entry_length = mode_page->entry_length;
7453 bus = mode_page->bus;
7455 for (i = 0; i < mode_page->num_entries; i++) {
7456 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7457 dev_err(&ioa_cfg->pdev->dev,
7458 "Term power is absent on scsi bus %d\n",
7462 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7467 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7468 * @ioa_cfg: ioa config struct
7470 * Looks through the config table checking for SES devices. If
7471 * the SES device is in the SES table indicating a maximum SCSI
7472 * bus speed, the speed is limited for the bus.
7477 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7482 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7483 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7484 ioa_cfg->bus_attr[i].bus_width);
7486 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7487 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7492 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7493 * @ioa_cfg: ioa config struct
7494 * @mode_pages: mode page 28 buffer
7496 * Updates mode page 28 based on driver configuration
7501 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7502 struct ipr_mode_pages *mode_pages)
7504 int i, entry_length;
7505 struct ipr_dev_bus_entry *bus;
7506 struct ipr_bus_attributes *bus_attr;
7507 struct ipr_mode_page28 *mode_page;
7509 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7510 sizeof(struct ipr_mode_page28));
7512 entry_length = mode_page->entry_length;
7514 /* Loop for each device bus entry */
7515 for (i = 0, bus = mode_page->bus;
7516 i < mode_page->num_entries;
7517 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7518 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7519 dev_err(&ioa_cfg->pdev->dev,
7520 "Invalid resource address reported: 0x%08X\n",
7521 IPR_GET_PHYS_LOC(bus->res_addr));
7525 bus_attr = &ioa_cfg->bus_attr[i];
7526 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7527 bus->bus_width = bus_attr->bus_width;
7528 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7529 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7530 if (bus_attr->qas_enabled)
7531 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7533 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7538 * ipr_build_mode_select - Build a mode select command
7539 * @ipr_cmd: ipr command struct
7540 * @res_handle: resource handle to send command to
7541 * @parm: Byte 2 of Mode Sense command
7542 * @dma_addr: DMA buffer address
7543 * @xfer_len: data transfer length
7548 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
7549 __be32 res_handle, u8 parm,
7550 dma_addr_t dma_addr, u8 xfer_len)
7552 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7554 ioarcb->res_handle = res_handle;
7555 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7556 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7557 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7558 ioarcb->cmd_pkt.cdb[1] = parm;
7559 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7561 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
7565 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7566 * @ipr_cmd: ipr command struct
7568 * This function sets up the SCSI bus attributes and sends
7569 * a Mode Select for Page 28 to activate them.
7574 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7576 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7577 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7581 ipr_scsi_bus_speed_limit(ioa_cfg);
7582 ipr_check_term_power(ioa_cfg, mode_pages);
7583 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7584 length = mode_pages->hdr.length + 1;
7585 mode_pages->hdr.length = 0;
7587 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7588 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7591 ipr_cmd->job_step = ipr_set_supported_devs;
7592 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7593 struct ipr_resource_entry, queue);
7594 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7597 return IPR_RC_JOB_RETURN;
7601 * ipr_build_mode_sense - Builds a mode sense command
7602 * @ipr_cmd: ipr command struct
7603 * @res: resource entry struct
7604 * @parm: Byte 2 of mode sense command
7605 * @dma_addr: DMA address of mode sense buffer
7606 * @xfer_len: Size of DMA buffer
7611 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7613 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
7615 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7617 ioarcb->res_handle = res_handle;
7618 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7619 ioarcb->cmd_pkt.cdb[2] = parm;
7620 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7621 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7623 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7627 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7628 * @ipr_cmd: ipr command struct
7630 * This function handles the failure of an IOA bringup command.
7635 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7637 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7638 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7640 dev_err(&ioa_cfg->pdev->dev,
7641 "0x%02X failed with IOASC: 0x%08X\n",
7642 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7644 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7645 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7646 return IPR_RC_JOB_RETURN;
7650 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7651 * @ipr_cmd: ipr command struct
7653 * This function handles the failure of a Mode Sense to the IOAFP.
7654 * Some adapters do not handle all mode pages.
7657 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7659 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7661 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7662 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7664 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7665 ipr_cmd->job_step = ipr_set_supported_devs;
7666 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7667 struct ipr_resource_entry, queue);
7668 return IPR_RC_JOB_CONTINUE;
7671 return ipr_reset_cmd_failed(ipr_cmd);
7675 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7676 * @ipr_cmd: ipr command struct
7678 * This function send a Page 28 mode sense to the IOA to
7679 * retrieve SCSI bus attributes.
7684 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7686 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7689 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7690 0x28, ioa_cfg->vpd_cbs_dma +
7691 offsetof(struct ipr_misc_cbs, mode_pages),
7692 sizeof(struct ipr_mode_pages));
7694 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
7695 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
7697 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7700 return IPR_RC_JOB_RETURN;
7704 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7705 * @ipr_cmd: ipr command struct
7707 * This function enables dual IOA RAID support if possible.
7712 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7714 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7715 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7716 struct ipr_mode_page24 *mode_page;
7720 mode_page = ipr_get_mode_page(mode_pages, 0x24,
7721 sizeof(struct ipr_mode_page24));
7724 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7726 length = mode_pages->hdr.length + 1;
7727 mode_pages->hdr.length = 0;
7729 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7730 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7733 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7734 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7737 return IPR_RC_JOB_RETURN;
7741 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7742 * @ipr_cmd: ipr command struct
7744 * This function handles the failure of a Mode Sense to the IOAFP.
7745 * Some adapters do not handle all mode pages.
7748 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7750 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7752 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7754 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7755 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7756 return IPR_RC_JOB_CONTINUE;
7759 return ipr_reset_cmd_failed(ipr_cmd);
7763 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7764 * @ipr_cmd: ipr command struct
7766 * This function send a mode sense to the IOA to retrieve
7767 * the IOA Advanced Function Control mode page.
7772 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7774 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7777 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7778 0x24, ioa_cfg->vpd_cbs_dma +
7779 offsetof(struct ipr_misc_cbs, mode_pages),
7780 sizeof(struct ipr_mode_pages));
7782 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7783 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7785 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7788 return IPR_RC_JOB_RETURN;
7792 * ipr_init_res_table - Initialize the resource table
7793 * @ipr_cmd: ipr command struct
7795 * This function looks through the existing resource table, comparing
7796 * it with the config table. This function will take care of old/new
7797 * devices and schedule adding/removing them from the mid-layer
7801 * IPR_RC_JOB_CONTINUE
7803 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7805 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7806 struct ipr_resource_entry *res, *temp;
7807 struct ipr_config_table_entry_wrapper cfgtew;
7808 int entries, found, flag, i;
7813 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7815 flag = ioa_cfg->u.cfg_table->hdr.flags;
7817 if (flag & IPR_UCODE_DOWNLOAD_REQ)
7818 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7820 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7821 list_move_tail(&res->queue, &old_res);
7824 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7826 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7828 for (i = 0; i < entries; i++) {
7830 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7832 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7835 list_for_each_entry_safe(res, temp, &old_res, queue) {
7836 if (ipr_is_same_device(res, &cfgtew)) {
7837 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7844 if (list_empty(&ioa_cfg->free_res_q)) {
7845 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7850 res = list_entry(ioa_cfg->free_res_q.next,
7851 struct ipr_resource_entry, queue);
7852 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7853 ipr_init_res_entry(res, &cfgtew);
7855 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7856 res->sdev->allow_restart = 1;
7859 ipr_update_res_entry(res, &cfgtew);
7862 list_for_each_entry_safe(res, temp, &old_res, queue) {
7864 res->del_from_ml = 1;
7865 res->res_handle = IPR_INVALID_RES_HANDLE;
7866 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7870 list_for_each_entry_safe(res, temp, &old_res, queue) {
7871 ipr_clear_res_target(res);
7872 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7875 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7876 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7878 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7881 return IPR_RC_JOB_CONTINUE;
7885 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7886 * @ipr_cmd: ipr command struct
7888 * This function sends a Query IOA Configuration command
7889 * to the adapter to retrieve the IOA configuration table.
7894 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7896 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7897 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7898 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7899 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7902 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7903 ioa_cfg->dual_raid = 1;
7904 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7905 ucode_vpd->major_release, ucode_vpd->card_type,
7906 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7907 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7908 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7910 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7911 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7912 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7913 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7915 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7916 IPR_IOADL_FLAGS_READ_LAST);
7918 ipr_cmd->job_step = ipr_init_res_table;
7920 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7923 return IPR_RC_JOB_RETURN;
7926 static int ipr_ioa_service_action_failed(struct ipr_cmnd *ipr_cmd)
7928 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7930 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT)
7931 return IPR_RC_JOB_CONTINUE;
7933 return ipr_reset_cmd_failed(ipr_cmd);
7936 static void ipr_build_ioa_service_action(struct ipr_cmnd *ipr_cmd,
7937 __be32 res_handle, u8 sa_code)
7939 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7941 ioarcb->res_handle = res_handle;
7942 ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION;
7943 ioarcb->cmd_pkt.cdb[1] = sa_code;
7944 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7948 * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7954 static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd *ipr_cmd)
7956 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7957 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7958 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7962 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7964 if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) {
7965 ipr_build_ioa_service_action(ipr_cmd,
7966 cpu_to_be32(IPR_IOA_RES_HANDLE),
7967 IPR_IOA_SA_CHANGE_CACHE_PARAMS);
7969 ioarcb->cmd_pkt.cdb[2] = 0x40;
7971 ipr_cmd->job_step_failed = ipr_ioa_service_action_failed;
7972 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7973 IPR_SET_SUP_DEVICE_TIMEOUT);
7976 return IPR_RC_JOB_RETURN;
7980 return IPR_RC_JOB_CONTINUE;
7984 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7985 * @ipr_cmd: ipr command struct
7987 * This utility function sends an inquiry to the adapter.
7992 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7993 dma_addr_t dma_addr, u8 xfer_len)
7995 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7998 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7999 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8001 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
8002 ioarcb->cmd_pkt.cdb[1] = flags;
8003 ioarcb->cmd_pkt.cdb[2] = page;
8004 ioarcb->cmd_pkt.cdb[4] = xfer_len;
8006 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
8008 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
8013 * ipr_inquiry_page_supported - Is the given inquiry page supported
8014 * @page0: inquiry page 0 buffer
8017 * This function determines if the specified inquiry page is supported.
8020 * 1 if page is supported / 0 if not
8022 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
8026 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
8027 if (page0->page[i] == page)
8034 * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
8035 * @ipr_cmd: ipr command struct
8037 * This function sends a Page 0xC4 inquiry to the adapter
8038 * to retrieve software VPD information.
8041 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8043 static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd *ipr_cmd)
8045 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8046 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8047 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
8050 ipr_cmd->job_step = ipr_ioafp_set_caching_parameters;
8051 memset(pageC4, 0, sizeof(*pageC4));
8053 if (ipr_inquiry_page_supported(page0, 0xC4)) {
8054 ipr_ioafp_inquiry(ipr_cmd, 1, 0xC4,
8055 (ioa_cfg->vpd_cbs_dma
8056 + offsetof(struct ipr_misc_cbs,
8058 sizeof(struct ipr_inquiry_pageC4));
8059 return IPR_RC_JOB_RETURN;
8063 return IPR_RC_JOB_CONTINUE;
8067 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
8068 * @ipr_cmd: ipr command struct
8070 * This function sends a Page 0xD0 inquiry to the adapter
8071 * to retrieve adapter capabilities.
8074 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8076 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
8078 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8079 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8080 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
8083 ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry;
8084 memset(cap, 0, sizeof(*cap));
8086 if (ipr_inquiry_page_supported(page0, 0xD0)) {
8087 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
8088 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
8089 sizeof(struct ipr_inquiry_cap));
8090 return IPR_RC_JOB_RETURN;
8094 return IPR_RC_JOB_CONTINUE;
8098 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
8099 * @ipr_cmd: ipr command struct
8101 * This function sends a Page 3 inquiry to the adapter
8102 * to retrieve software VPD information.
8105 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8107 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
8109 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8113 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
8115 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
8116 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
8117 sizeof(struct ipr_inquiry_page3));
8120 return IPR_RC_JOB_RETURN;
8124 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
8125 * @ipr_cmd: ipr command struct
8127 * This function sends a Page 0 inquiry to the adapter
8128 * to retrieve supported inquiry pages.
8131 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8133 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
8135 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8140 /* Grab the type out of the VPD and store it away */
8141 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
8143 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
8145 if (ipr_invalid_adapter(ioa_cfg)) {
8146 dev_err(&ioa_cfg->pdev->dev,
8147 "Adapter not supported in this hardware configuration.\n");
8149 if (!ipr_testmode) {
8150 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
8151 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8152 list_add_tail(&ipr_cmd->queue,
8153 &ioa_cfg->hrrq->hrrq_free_q);
8154 return IPR_RC_JOB_RETURN;
8158 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
8160 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
8161 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
8162 sizeof(struct ipr_inquiry_page0));
8165 return IPR_RC_JOB_RETURN;
8169 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
8170 * @ipr_cmd: ipr command struct
8172 * This function sends a standard inquiry to the adapter.
8177 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
8179 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8182 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
8184 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
8185 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
8186 sizeof(struct ipr_ioa_vpd));
8189 return IPR_RC_JOB_RETURN;
8193 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
8194 * @ipr_cmd: ipr command struct
8196 * This function send an Identify Host Request Response Queue
8197 * command to establish the HRRQ with the adapter.
8202 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
8204 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8205 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
8206 struct ipr_hrr_queue *hrrq;
8209 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
8210 if (ioa_cfg->identify_hrrq_index == 0)
8211 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
8213 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
8214 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
8216 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
8217 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8219 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8221 ioarcb->cmd_pkt.cdb[1] = 0x1;
8223 if (ioa_cfg->nvectors == 1)
8224 ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
8226 ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
8228 ioarcb->cmd_pkt.cdb[2] =
8229 ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
8230 ioarcb->cmd_pkt.cdb[3] =
8231 ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
8232 ioarcb->cmd_pkt.cdb[4] =
8233 ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
8234 ioarcb->cmd_pkt.cdb[5] =
8235 ((u64) hrrq->host_rrq_dma) & 0xff;
8236 ioarcb->cmd_pkt.cdb[7] =
8237 ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
8238 ioarcb->cmd_pkt.cdb[8] =
8239 (sizeof(u32) * hrrq->size) & 0xff;
8241 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8242 ioarcb->cmd_pkt.cdb[9] =
8243 ioa_cfg->identify_hrrq_index;
8245 if (ioa_cfg->sis64) {
8246 ioarcb->cmd_pkt.cdb[10] =
8247 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
8248 ioarcb->cmd_pkt.cdb[11] =
8249 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
8250 ioarcb->cmd_pkt.cdb[12] =
8251 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
8252 ioarcb->cmd_pkt.cdb[13] =
8253 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
8256 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8257 ioarcb->cmd_pkt.cdb[14] =
8258 ioa_cfg->identify_hrrq_index;
8260 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8261 IPR_INTERNAL_TIMEOUT);
8263 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
8264 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8267 return IPR_RC_JOB_RETURN;
8271 return IPR_RC_JOB_CONTINUE;
8275 * ipr_reset_timer_done - Adapter reset timer function
8276 * @ipr_cmd: ipr command struct
8278 * Description: This function is used in adapter reset processing
8279 * for timing events. If the reset_cmd pointer in the IOA
8280 * config struct is not this adapter's we are doing nested
8281 * resets and fail_all_ops will take care of freeing the
8287 static void ipr_reset_timer_done(struct timer_list *t)
8289 struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
8290 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8291 unsigned long lock_flags = 0;
8293 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8295 if (ioa_cfg->reset_cmd == ipr_cmd) {
8296 list_del(&ipr_cmd->queue);
8297 ipr_cmd->done(ipr_cmd);
8300 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8304 * ipr_reset_start_timer - Start a timer for adapter reset job
8305 * @ipr_cmd: ipr command struct
8306 * @timeout: timeout value
8308 * Description: This function is used in adapter reset processing
8309 * for timing events. If the reset_cmd pointer in the IOA
8310 * config struct is not this adapter's we are doing nested
8311 * resets and fail_all_ops will take care of freeing the
8317 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
8318 unsigned long timeout)
8322 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8323 ipr_cmd->done = ipr_reset_ioa_job;
8325 ipr_cmd->timer.expires = jiffies + timeout;
8326 ipr_cmd->timer.function = ipr_reset_timer_done;
8327 add_timer(&ipr_cmd->timer);
8331 * ipr_init_ioa_mem - Initialize ioa_cfg control block
8332 * @ioa_cfg: ioa cfg struct
8337 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
8339 struct ipr_hrr_queue *hrrq;
8341 for_each_hrrq(hrrq, ioa_cfg) {
8342 spin_lock(&hrrq->_lock);
8343 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
8345 /* Initialize Host RRQ pointers */
8346 hrrq->hrrq_start = hrrq->host_rrq;
8347 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
8348 hrrq->hrrq_curr = hrrq->hrrq_start;
8349 hrrq->toggle_bit = 1;
8350 spin_unlock(&hrrq->_lock);
8354 ioa_cfg->identify_hrrq_index = 0;
8355 if (ioa_cfg->hrrq_num == 1)
8356 atomic_set(&ioa_cfg->hrrq_index, 0);
8358 atomic_set(&ioa_cfg->hrrq_index, 1);
8360 /* Zero out config table */
8361 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
8365 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
8366 * @ipr_cmd: ipr command struct
8369 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8371 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
8373 unsigned long stage, stage_time;
8375 volatile u32 int_reg;
8376 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8379 feedback = readl(ioa_cfg->regs.init_feedback_reg);
8380 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
8381 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
8383 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
8385 /* sanity check the stage_time value */
8386 if (stage_time == 0)
8387 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
8388 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
8389 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
8390 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
8391 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
8393 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
8394 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
8395 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8396 stage_time = ioa_cfg->transop_timeout;
8397 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8398 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
8399 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8400 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8401 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8402 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8403 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
8404 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
8405 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8406 return IPR_RC_JOB_CONTINUE;
8410 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
8411 ipr_cmd->timer.function = ipr_oper_timeout;
8412 ipr_cmd->done = ipr_reset_ioa_job;
8413 add_timer(&ipr_cmd->timer);
8415 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8417 return IPR_RC_JOB_RETURN;
8421 * ipr_reset_enable_ioa - Enable the IOA following a reset.
8422 * @ipr_cmd: ipr command struct
8424 * This function reinitializes some control blocks and
8425 * enables destructive diagnostics on the adapter.
8430 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
8432 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8433 volatile u32 int_reg;
8434 volatile u64 maskval;
8438 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8439 ipr_init_ioa_mem(ioa_cfg);
8441 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8442 spin_lock(&ioa_cfg->hrrq[i]._lock);
8443 ioa_cfg->hrrq[i].allow_interrupts = 1;
8444 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8446 if (ioa_cfg->sis64) {
8447 /* Set the adapter to the correct endian mode. */
8448 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8449 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8452 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8454 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8455 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
8456 ioa_cfg->regs.clr_interrupt_mask_reg32);
8457 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8458 return IPR_RC_JOB_CONTINUE;
8461 /* Enable destructive diagnostics on IOA */
8462 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
8464 if (ioa_cfg->sis64) {
8465 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8466 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
8467 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
8469 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
8471 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8473 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
8475 if (ioa_cfg->sis64) {
8476 ipr_cmd->job_step = ipr_reset_next_stage;
8477 return IPR_RC_JOB_CONTINUE;
8480 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
8481 ipr_cmd->timer.function = ipr_oper_timeout;
8482 ipr_cmd->done = ipr_reset_ioa_job;
8483 add_timer(&ipr_cmd->timer);
8484 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8487 return IPR_RC_JOB_RETURN;
8491 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8492 * @ipr_cmd: ipr command struct
8494 * This function is invoked when an adapter dump has run out
8495 * of processing time.
8498 * IPR_RC_JOB_CONTINUE
8500 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8502 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8504 if (ioa_cfg->sdt_state == GET_DUMP)
8505 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8506 else if (ioa_cfg->sdt_state == READ_DUMP)
8507 ioa_cfg->sdt_state = ABORT_DUMP;
8509 ioa_cfg->dump_timeout = 1;
8510 ipr_cmd->job_step = ipr_reset_alert;
8512 return IPR_RC_JOB_CONTINUE;
8516 * ipr_unit_check_no_data - Log a unit check/no data error log
8517 * @ioa_cfg: ioa config struct
8519 * Logs an error indicating the adapter unit checked, but for some
8520 * reason, we were unable to fetch the unit check buffer.
8525 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8527 ioa_cfg->errors_logged++;
8528 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8532 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8533 * @ioa_cfg: ioa config struct
8535 * Fetches the unit check buffer from the adapter by clocking the data
8536 * through the mailbox register.
8541 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8543 unsigned long mailbox;
8544 struct ipr_hostrcb *hostrcb;
8545 struct ipr_uc_sdt sdt;
8549 mailbox = readl(ioa_cfg->ioa_mailbox);
8551 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
8552 ipr_unit_check_no_data(ioa_cfg);
8556 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8557 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8558 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8560 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8561 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8562 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
8563 ipr_unit_check_no_data(ioa_cfg);
8567 /* Find length of the first sdt entry (UC buffer) */
8568 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8569 length = be32_to_cpu(sdt.entry[0].end_token);
8571 length = (be32_to_cpu(sdt.entry[0].end_token) -
8572 be32_to_cpu(sdt.entry[0].start_token)) &
8573 IPR_FMT2_MBX_ADDR_MASK;
8575 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8576 struct ipr_hostrcb, queue);
8577 list_del_init(&hostrcb->queue);
8578 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8580 rc = ipr_get_ldump_data_section(ioa_cfg,
8581 be32_to_cpu(sdt.entry[0].start_token),
8582 (__be32 *)&hostrcb->hcam,
8583 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8586 ipr_handle_log_data(ioa_cfg, hostrcb);
8587 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
8588 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8589 ioa_cfg->sdt_state == GET_DUMP)
8590 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8592 ipr_unit_check_no_data(ioa_cfg);
8594 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8598 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8599 * @ipr_cmd: ipr command struct
8601 * Description: This function will call to get the unit check buffer.
8606 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8608 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8611 ioa_cfg->ioa_unit_checked = 0;
8612 ipr_get_unit_check_buffer(ioa_cfg);
8613 ipr_cmd->job_step = ipr_reset_alert;
8614 ipr_reset_start_timer(ipr_cmd, 0);
8617 return IPR_RC_JOB_RETURN;
8620 static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd)
8622 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8626 if (ioa_cfg->sdt_state != GET_DUMP)
8627 return IPR_RC_JOB_RETURN;
8629 if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left ||
8630 (readl(ioa_cfg->regs.sense_interrupt_reg) &
8631 IPR_PCII_MAILBOX_STABLE)) {
8633 if (!ipr_cmd->u.time_left)
8634 dev_err(&ioa_cfg->pdev->dev,
8635 "Timed out waiting for Mailbox register.\n");
8637 ioa_cfg->sdt_state = READ_DUMP;
8638 ioa_cfg->dump_timeout = 0;
8640 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8642 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8643 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8644 schedule_work(&ioa_cfg->work_q);
8647 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8648 ipr_reset_start_timer(ipr_cmd,
8649 IPR_CHECK_FOR_RESET_TIMEOUT);
8653 return IPR_RC_JOB_RETURN;
8657 * ipr_reset_restore_cfg_space - Restore PCI config space.
8658 * @ipr_cmd: ipr command struct
8660 * Description: This function restores the saved PCI config space of
8661 * the adapter, fails all outstanding ops back to the callers, and
8662 * fetches the dump/unit check if applicable to this reset.
8665 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8667 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8669 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8673 ioa_cfg->pdev->state_saved = true;
8674 pci_restore_state(ioa_cfg->pdev);
8676 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8677 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8678 return IPR_RC_JOB_CONTINUE;
8681 ipr_fail_all_ops(ioa_cfg);
8683 if (ioa_cfg->sis64) {
8684 /* Set the adapter to the correct endian mode. */
8685 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8686 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8689 if (ioa_cfg->ioa_unit_checked) {
8690 if (ioa_cfg->sis64) {
8691 ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8692 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8693 return IPR_RC_JOB_RETURN;
8695 ioa_cfg->ioa_unit_checked = 0;
8696 ipr_get_unit_check_buffer(ioa_cfg);
8697 ipr_cmd->job_step = ipr_reset_alert;
8698 ipr_reset_start_timer(ipr_cmd, 0);
8699 return IPR_RC_JOB_RETURN;
8703 if (ioa_cfg->in_ioa_bringdown) {
8704 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8705 } else if (ioa_cfg->sdt_state == GET_DUMP) {
8706 ipr_cmd->job_step = ipr_dump_mailbox_wait;
8707 ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX;
8709 ipr_cmd->job_step = ipr_reset_enable_ioa;
8713 return IPR_RC_JOB_CONTINUE;
8717 * ipr_reset_bist_done - BIST has completed on the adapter.
8718 * @ipr_cmd: ipr command struct
8720 * Description: Unblock config space and resume the reset process.
8723 * IPR_RC_JOB_CONTINUE
8725 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8727 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8730 if (ioa_cfg->cfg_locked)
8731 pci_cfg_access_unlock(ioa_cfg->pdev);
8732 ioa_cfg->cfg_locked = 0;
8733 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8735 return IPR_RC_JOB_CONTINUE;
8739 * ipr_reset_start_bist - Run BIST on the adapter.
8740 * @ipr_cmd: ipr command struct
8742 * Description: This function runs BIST on the adapter, then delays 2 seconds.
8745 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8747 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8749 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8750 int rc = PCIBIOS_SUCCESSFUL;
8753 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8754 writel(IPR_UPROCI_SIS64_START_BIST,
8755 ioa_cfg->regs.set_uproc_interrupt_reg32);
8757 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8759 if (rc == PCIBIOS_SUCCESSFUL) {
8760 ipr_cmd->job_step = ipr_reset_bist_done;
8761 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8762 rc = IPR_RC_JOB_RETURN;
8764 if (ioa_cfg->cfg_locked)
8765 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8766 ioa_cfg->cfg_locked = 0;
8767 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8768 rc = IPR_RC_JOB_CONTINUE;
8776 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8777 * @ipr_cmd: ipr command struct
8779 * Description: This clears PCI reset to the adapter and delays two seconds.
8784 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8787 ipr_cmd->job_step = ipr_reset_bist_done;
8788 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8790 return IPR_RC_JOB_RETURN;
8794 * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8795 * @work: work struct
8797 * Description: This pulses warm reset to a slot.
8800 static void ipr_reset_reset_work(struct work_struct *work)
8802 struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
8803 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8804 struct pci_dev *pdev = ioa_cfg->pdev;
8805 unsigned long lock_flags = 0;
8808 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8809 msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
8810 pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
8812 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8813 if (ioa_cfg->reset_cmd == ipr_cmd)
8814 ipr_reset_ioa_job(ipr_cmd);
8815 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8820 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8821 * @ipr_cmd: ipr command struct
8823 * Description: This asserts PCI reset to the adapter.
8828 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8830 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8833 INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
8834 queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
8835 ipr_cmd->job_step = ipr_reset_slot_reset_done;
8837 return IPR_RC_JOB_RETURN;
8841 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8842 * @ipr_cmd: ipr command struct
8844 * Description: This attempts to block config access to the IOA.
8847 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8849 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8851 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8852 int rc = IPR_RC_JOB_CONTINUE;
8854 if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8855 ioa_cfg->cfg_locked = 1;
8856 ipr_cmd->job_step = ioa_cfg->reset;
8858 if (ipr_cmd->u.time_left) {
8859 rc = IPR_RC_JOB_RETURN;
8860 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8861 ipr_reset_start_timer(ipr_cmd,
8862 IPR_CHECK_FOR_RESET_TIMEOUT);
8864 ipr_cmd->job_step = ioa_cfg->reset;
8865 dev_err(&ioa_cfg->pdev->dev,
8866 "Timed out waiting to lock config access. Resetting anyway.\n");
8874 * ipr_reset_block_config_access - Block config access to the IOA
8875 * @ipr_cmd: ipr command struct
8877 * Description: This attempts to block config access to the IOA
8880 * IPR_RC_JOB_CONTINUE
8882 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8884 ipr_cmd->ioa_cfg->cfg_locked = 0;
8885 ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8886 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8887 return IPR_RC_JOB_CONTINUE;
8891 * ipr_reset_allowed - Query whether or not IOA can be reset
8892 * @ioa_cfg: ioa config struct
8895 * 0 if reset not allowed / non-zero if reset is allowed
8897 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8899 volatile u32 temp_reg;
8901 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8902 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8906 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8907 * @ipr_cmd: ipr command struct
8909 * Description: This function waits for adapter permission to run BIST,
8910 * then runs BIST. If the adapter does not give permission after a
8911 * reasonable time, we will reset the adapter anyway. The impact of
8912 * resetting the adapter without warning the adapter is the risk of
8913 * losing the persistent error log on the adapter. If the adapter is
8914 * reset while it is writing to the flash on the adapter, the flash
8915 * segment will have bad ECC and be zeroed.
8918 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8920 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8922 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8923 int rc = IPR_RC_JOB_RETURN;
8925 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8926 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8927 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8929 ipr_cmd->job_step = ipr_reset_block_config_access;
8930 rc = IPR_RC_JOB_CONTINUE;
8937 * ipr_reset_alert - Alert the adapter of a pending reset
8938 * @ipr_cmd: ipr command struct
8940 * Description: This function alerts the adapter that it will be reset.
8941 * If memory space is not currently enabled, proceed directly
8942 * to running BIST on the adapter. The timer must always be started
8943 * so we guarantee we do not run BIST from ipr_isr.
8948 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8950 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8955 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8957 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8958 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8959 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8960 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8962 ipr_cmd->job_step = ipr_reset_block_config_access;
8965 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8966 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8969 return IPR_RC_JOB_RETURN;
8973 * ipr_reset_quiesce_done - Complete IOA disconnect
8974 * @ipr_cmd: ipr command struct
8976 * Description: Freeze the adapter to complete quiesce processing
8979 * IPR_RC_JOB_CONTINUE
8981 static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
8983 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8986 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8987 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8989 return IPR_RC_JOB_CONTINUE;
8993 * ipr_reset_cancel_hcam_done - Check for outstanding commands
8994 * @ipr_cmd: ipr command struct
8996 * Description: Ensure nothing is outstanding to the IOA and
8997 * proceed with IOA disconnect. Otherwise reset the IOA.
9000 * IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
9002 static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
9004 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9005 struct ipr_cmnd *loop_cmd;
9006 struct ipr_hrr_queue *hrrq;
9007 int rc = IPR_RC_JOB_CONTINUE;
9011 ipr_cmd->job_step = ipr_reset_quiesce_done;
9013 for_each_hrrq(hrrq, ioa_cfg) {
9014 spin_lock(&hrrq->_lock);
9015 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
9017 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9018 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9019 rc = IPR_RC_JOB_RETURN;
9022 spin_unlock(&hrrq->_lock);
9033 * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
9034 * @ipr_cmd: ipr command struct
9036 * Description: Cancel any oustanding HCAMs to the IOA.
9039 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9041 static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
9043 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9044 int rc = IPR_RC_JOB_CONTINUE;
9045 struct ipr_cmd_pkt *cmd_pkt;
9046 struct ipr_cmnd *hcam_cmd;
9047 struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
9050 ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
9052 if (!hrrq->ioa_is_dead) {
9053 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
9054 list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
9055 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
9058 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9059 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9060 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
9061 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
9062 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
9063 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
9064 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
9065 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
9066 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
9067 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
9068 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
9069 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
9070 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
9071 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
9073 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9074 IPR_CANCEL_TIMEOUT);
9076 rc = IPR_RC_JOB_RETURN;
9077 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9082 ipr_cmd->job_step = ipr_reset_alert;
9089 * ipr_reset_ucode_download_done - Microcode download completion
9090 * @ipr_cmd: ipr command struct
9092 * Description: This function unmaps the microcode download buffer.
9095 * IPR_RC_JOB_CONTINUE
9097 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
9099 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9100 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9102 dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
9103 sglist->num_sg, DMA_TO_DEVICE);
9105 ipr_cmd->job_step = ipr_reset_alert;
9106 return IPR_RC_JOB_CONTINUE;
9110 * ipr_reset_ucode_download - Download microcode to the adapter
9111 * @ipr_cmd: ipr command struct
9113 * Description: This function checks to see if it there is microcode
9114 * to download to the adapter. If there is, a download is performed.
9117 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9119 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
9121 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9122 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9125 ipr_cmd->job_step = ipr_reset_alert;
9128 return IPR_RC_JOB_CONTINUE;
9130 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9131 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
9132 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
9133 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
9134 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
9135 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
9136 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
9139 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
9141 ipr_build_ucode_ioadl(ipr_cmd, sglist);
9142 ipr_cmd->job_step = ipr_reset_ucode_download_done;
9144 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9145 IPR_WRITE_BUFFER_TIMEOUT);
9148 return IPR_RC_JOB_RETURN;
9152 * ipr_reset_shutdown_ioa - Shutdown the adapter
9153 * @ipr_cmd: ipr command struct
9155 * Description: This function issues an adapter shutdown of the
9156 * specified type to the specified adapter as part of the
9157 * adapter reset job.
9160 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9162 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
9164 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9165 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
9166 unsigned long timeout;
9167 int rc = IPR_RC_JOB_CONTINUE;
9170 if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
9171 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9172 else if (shutdown_type != IPR_SHUTDOWN_NONE &&
9173 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
9174 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9175 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9176 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
9177 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
9179 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
9180 timeout = IPR_SHUTDOWN_TIMEOUT;
9181 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
9182 timeout = IPR_INTERNAL_TIMEOUT;
9183 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
9184 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
9186 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
9188 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
9190 rc = IPR_RC_JOB_RETURN;
9191 ipr_cmd->job_step = ipr_reset_ucode_download;
9193 ipr_cmd->job_step = ipr_reset_alert;
9200 * ipr_reset_ioa_job - Adapter reset job
9201 * @ipr_cmd: ipr command struct
9203 * Description: This function is the job router for the adapter reset job.
9208 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
9211 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9214 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
9216 if (ioa_cfg->reset_cmd != ipr_cmd) {
9218 * We are doing nested adapter resets and this is
9219 * not the current reset job.
9221 list_add_tail(&ipr_cmd->queue,
9222 &ipr_cmd->hrrq->hrrq_free_q);
9226 if (IPR_IOASC_SENSE_KEY(ioasc)) {
9227 rc = ipr_cmd->job_step_failed(ipr_cmd);
9228 if (rc == IPR_RC_JOB_RETURN)
9232 ipr_reinit_ipr_cmnd(ipr_cmd);
9233 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
9234 rc = ipr_cmd->job_step(ipr_cmd);
9235 } while (rc == IPR_RC_JOB_CONTINUE);
9239 * _ipr_initiate_ioa_reset - Initiate an adapter reset
9240 * @ioa_cfg: ioa config struct
9241 * @job_step: first job step of reset job
9242 * @shutdown_type: shutdown type
9244 * Description: This function will initiate the reset of the given adapter
9245 * starting at the selected job step.
9246 * If the caller needs to wait on the completion of the reset,
9247 * the caller must sleep on the reset_wait_q.
9252 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9253 int (*job_step) (struct ipr_cmnd *),
9254 enum ipr_shutdown_type shutdown_type)
9256 struct ipr_cmnd *ipr_cmd;
9259 ioa_cfg->in_reset_reload = 1;
9260 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9261 spin_lock(&ioa_cfg->hrrq[i]._lock);
9262 ioa_cfg->hrrq[i].allow_cmds = 0;
9263 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9266 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9267 ioa_cfg->scsi_unblock = 0;
9268 ioa_cfg->scsi_blocked = 1;
9269 scsi_block_requests(ioa_cfg->host);
9272 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9273 ioa_cfg->reset_cmd = ipr_cmd;
9274 ipr_cmd->job_step = job_step;
9275 ipr_cmd->u.shutdown_type = shutdown_type;
9277 ipr_reset_ioa_job(ipr_cmd);
9281 * ipr_initiate_ioa_reset - Initiate an adapter reset
9282 * @ioa_cfg: ioa config struct
9283 * @shutdown_type: shutdown type
9285 * Description: This function will initiate the reset of the given adapter.
9286 * If the caller needs to wait on the completion of the reset,
9287 * the caller must sleep on the reset_wait_q.
9292 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9293 enum ipr_shutdown_type shutdown_type)
9297 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
9300 if (ioa_cfg->in_reset_reload) {
9301 if (ioa_cfg->sdt_state == GET_DUMP)
9302 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9303 else if (ioa_cfg->sdt_state == READ_DUMP)
9304 ioa_cfg->sdt_state = ABORT_DUMP;
9307 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
9308 dev_err(&ioa_cfg->pdev->dev,
9309 "IOA taken offline - error recovery failed\n");
9311 ioa_cfg->reset_retries = 0;
9312 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9313 spin_lock(&ioa_cfg->hrrq[i]._lock);
9314 ioa_cfg->hrrq[i].ioa_is_dead = 1;
9315 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9319 if (ioa_cfg->in_ioa_bringdown) {
9320 ioa_cfg->reset_cmd = NULL;
9321 ioa_cfg->in_reset_reload = 0;
9322 ipr_fail_all_ops(ioa_cfg);
9323 wake_up_all(&ioa_cfg->reset_wait_q);
9325 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9326 ioa_cfg->scsi_unblock = 1;
9327 schedule_work(&ioa_cfg->work_q);
9331 ioa_cfg->in_ioa_bringdown = 1;
9332 shutdown_type = IPR_SHUTDOWN_NONE;
9336 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
9341 * ipr_reset_freeze - Hold off all I/O activity
9342 * @ipr_cmd: ipr command struct
9344 * Description: If the PCI slot is frozen, hold off all I/O
9345 * activity; then, as soon as the slot is available again,
9346 * initiate an adapter reset.
9348 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
9350 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9353 /* Disallow new interrupts, avoid loop */
9354 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9355 spin_lock(&ioa_cfg->hrrq[i]._lock);
9356 ioa_cfg->hrrq[i].allow_interrupts = 0;
9357 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9360 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
9361 ipr_cmd->done = ipr_reset_ioa_job;
9362 return IPR_RC_JOB_RETURN;
9366 * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
9367 * @pdev: PCI device struct
9369 * Description: This routine is called to tell us that the MMIO
9370 * access to the IOA has been restored
9372 static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
9374 unsigned long flags = 0;
9375 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9377 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9378 if (!ioa_cfg->probe_done)
9379 pci_save_state(pdev);
9380 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9381 return PCI_ERS_RESULT_NEED_RESET;
9385 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
9386 * @pdev: PCI device struct
9388 * Description: This routine is called to tell us that the PCI bus
9389 * is down. Can't do anything here, except put the device driver
9390 * into a holding pattern, waiting for the PCI bus to come back.
9392 static void ipr_pci_frozen(struct pci_dev *pdev)
9394 unsigned long flags = 0;
9395 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9397 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9398 if (ioa_cfg->probe_done)
9399 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
9400 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9404 * ipr_pci_slot_reset - Called when PCI slot has been reset.
9405 * @pdev: PCI device struct
9407 * Description: This routine is called by the pci error recovery
9408 * code after the PCI slot has been reset, just before we
9409 * should resume normal operations.
9411 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
9413 unsigned long flags = 0;
9414 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9416 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9417 if (ioa_cfg->probe_done) {
9418 if (ioa_cfg->needs_warm_reset)
9419 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9421 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
9424 wake_up_all(&ioa_cfg->eeh_wait_q);
9425 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9426 return PCI_ERS_RESULT_RECOVERED;
9430 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
9431 * @pdev: PCI device struct
9433 * Description: This routine is called when the PCI bus has
9434 * permanently failed.
9436 static void ipr_pci_perm_failure(struct pci_dev *pdev)
9438 unsigned long flags = 0;
9439 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9442 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9443 if (ioa_cfg->probe_done) {
9444 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9445 ioa_cfg->sdt_state = ABORT_DUMP;
9446 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
9447 ioa_cfg->in_ioa_bringdown = 1;
9448 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9449 spin_lock(&ioa_cfg->hrrq[i]._lock);
9450 ioa_cfg->hrrq[i].allow_cmds = 0;
9451 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9454 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9456 wake_up_all(&ioa_cfg->eeh_wait_q);
9457 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9461 * ipr_pci_error_detected - Called when a PCI error is detected.
9462 * @pdev: PCI device struct
9463 * @state: PCI channel state
9465 * Description: Called when a PCI error is detected.
9468 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
9470 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
9471 pci_channel_state_t state)
9474 case pci_channel_io_frozen:
9475 ipr_pci_frozen(pdev);
9476 return PCI_ERS_RESULT_CAN_RECOVER;
9477 case pci_channel_io_perm_failure:
9478 ipr_pci_perm_failure(pdev);
9479 return PCI_ERS_RESULT_DISCONNECT;
9484 return PCI_ERS_RESULT_NEED_RESET;
9488 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9489 * @ioa_cfg: ioa cfg struct
9491 * Description: This is the second phase of adapter initialization
9492 * This function takes care of initilizing the adapter to the point
9493 * where it can accept new commands.
9496 * 0 on success / -EIO on failure
9498 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
9501 unsigned long host_lock_flags = 0;
9504 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9505 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
9506 ioa_cfg->probe_done = 1;
9507 if (ioa_cfg->needs_hard_reset) {
9508 ioa_cfg->needs_hard_reset = 0;
9509 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9511 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
9513 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9520 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9521 * @ioa_cfg: ioa config struct
9526 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9530 if (ioa_cfg->ipr_cmnd_list) {
9531 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9532 if (ioa_cfg->ipr_cmnd_list[i])
9533 dma_pool_free(ioa_cfg->ipr_cmd_pool,
9534 ioa_cfg->ipr_cmnd_list[i],
9535 ioa_cfg->ipr_cmnd_list_dma[i]);
9537 ioa_cfg->ipr_cmnd_list[i] = NULL;
9541 if (ioa_cfg->ipr_cmd_pool)
9542 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
9544 kfree(ioa_cfg->ipr_cmnd_list);
9545 kfree(ioa_cfg->ipr_cmnd_list_dma);
9546 ioa_cfg->ipr_cmnd_list = NULL;
9547 ioa_cfg->ipr_cmnd_list_dma = NULL;
9548 ioa_cfg->ipr_cmd_pool = NULL;
9552 * ipr_free_mem - Frees memory allocated for an adapter
9553 * @ioa_cfg: ioa cfg struct
9558 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
9562 kfree(ioa_cfg->res_entries);
9563 dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
9564 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9565 ipr_free_cmd_blks(ioa_cfg);
9567 for (i = 0; i < ioa_cfg->hrrq_num; i++)
9568 dma_free_coherent(&ioa_cfg->pdev->dev,
9569 sizeof(u32) * ioa_cfg->hrrq[i].size,
9570 ioa_cfg->hrrq[i].host_rrq,
9571 ioa_cfg->hrrq[i].host_rrq_dma);
9573 dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
9574 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9576 for (i = 0; i < IPR_MAX_HCAMS; i++) {
9577 dma_free_coherent(&ioa_cfg->pdev->dev,
9578 sizeof(struct ipr_hostrcb),
9579 ioa_cfg->hostrcb[i],
9580 ioa_cfg->hostrcb_dma[i]);
9583 ipr_free_dump(ioa_cfg);
9584 kfree(ioa_cfg->trace);
9588 * ipr_free_irqs - Free all allocated IRQs for the adapter.
9589 * @ioa_cfg: ipr cfg struct
9591 * This function frees all allocated IRQs for the
9592 * specified adapter.
9597 static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
9599 struct pci_dev *pdev = ioa_cfg->pdev;
9602 for (i = 0; i < ioa_cfg->nvectors; i++)
9603 free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]);
9604 pci_free_irq_vectors(pdev);
9608 * ipr_free_all_resources - Free all allocated resources for an adapter.
9609 * @ipr_cmd: ipr command struct
9611 * This function frees all allocated resources for the
9612 * specified adapter.
9617 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
9619 struct pci_dev *pdev = ioa_cfg->pdev;
9622 ipr_free_irqs(ioa_cfg);
9623 if (ioa_cfg->reset_work_q)
9624 destroy_workqueue(ioa_cfg->reset_work_q);
9625 iounmap(ioa_cfg->hdw_dma_regs);
9626 pci_release_regions(pdev);
9627 ipr_free_mem(ioa_cfg);
9628 scsi_host_put(ioa_cfg->host);
9629 pci_disable_device(pdev);
9634 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9635 * @ioa_cfg: ioa config struct
9638 * 0 on success / -ENOMEM on allocation failure
9640 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9642 struct ipr_cmnd *ipr_cmd;
9643 struct ipr_ioarcb *ioarcb;
9644 dma_addr_t dma_addr;
9645 int i, entries_each_hrrq, hrrq_id = 0;
9647 ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
9648 sizeof(struct ipr_cmnd), 512, 0);
9650 if (!ioa_cfg->ipr_cmd_pool)
9653 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
9654 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
9656 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
9657 ipr_free_cmd_blks(ioa_cfg);
9661 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9662 if (ioa_cfg->hrrq_num > 1) {
9664 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
9665 ioa_cfg->hrrq[i].min_cmd_id = 0;
9666 ioa_cfg->hrrq[i].max_cmd_id =
9667 (entries_each_hrrq - 1);
9670 IPR_NUM_BASE_CMD_BLKS/
9671 (ioa_cfg->hrrq_num - 1);
9672 ioa_cfg->hrrq[i].min_cmd_id =
9673 IPR_NUM_INTERNAL_CMD_BLKS +
9674 (i - 1) * entries_each_hrrq;
9675 ioa_cfg->hrrq[i].max_cmd_id =
9676 (IPR_NUM_INTERNAL_CMD_BLKS +
9677 i * entries_each_hrrq - 1);
9680 entries_each_hrrq = IPR_NUM_CMD_BLKS;
9681 ioa_cfg->hrrq[i].min_cmd_id = 0;
9682 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9684 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9687 BUG_ON(ioa_cfg->hrrq_num == 0);
9689 i = IPR_NUM_CMD_BLKS -
9690 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9692 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9693 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9696 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9697 ipr_cmd = dma_pool_zalloc(ioa_cfg->ipr_cmd_pool,
9698 GFP_KERNEL, &dma_addr);
9701 ipr_free_cmd_blks(ioa_cfg);
9705 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9706 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9708 ioarcb = &ipr_cmd->ioarcb;
9709 ipr_cmd->dma_addr = dma_addr;
9711 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9713 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9715 ioarcb->host_response_handle = cpu_to_be32(i << 2);
9716 if (ioa_cfg->sis64) {
9717 ioarcb->u.sis64_addr_data.data_ioadl_addr =
9718 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9719 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
9720 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
9722 ioarcb->write_ioadl_addr =
9723 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9724 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9725 ioarcb->ioasa_host_pci_addr =
9726 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
9728 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9729 ipr_cmd->cmd_index = i;
9730 ipr_cmd->ioa_cfg = ioa_cfg;
9731 ipr_cmd->sense_buffer_dma = dma_addr +
9732 offsetof(struct ipr_cmnd, sense_buffer);
9734 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9735 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9736 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9737 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9745 * ipr_alloc_mem - Allocate memory for an adapter
9746 * @ioa_cfg: ioa config struct
9749 * 0 on success / non-zero for error
9751 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9753 struct pci_dev *pdev = ioa_cfg->pdev;
9754 int i, rc = -ENOMEM;
9757 ioa_cfg->res_entries = kcalloc(ioa_cfg->max_devs_supported,
9758 sizeof(struct ipr_resource_entry),
9761 if (!ioa_cfg->res_entries)
9764 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
9765 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
9766 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9769 ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9770 sizeof(struct ipr_misc_cbs),
9771 &ioa_cfg->vpd_cbs_dma,
9774 if (!ioa_cfg->vpd_cbs)
9775 goto out_free_res_entries;
9777 if (ipr_alloc_cmd_blks(ioa_cfg))
9778 goto out_free_vpd_cbs;
9780 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9781 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
9782 sizeof(u32) * ioa_cfg->hrrq[i].size,
9783 &ioa_cfg->hrrq[i].host_rrq_dma,
9786 if (!ioa_cfg->hrrq[i].host_rrq) {
9788 dma_free_coherent(&pdev->dev,
9789 sizeof(u32) * ioa_cfg->hrrq[i].size,
9790 ioa_cfg->hrrq[i].host_rrq,
9791 ioa_cfg->hrrq[i].host_rrq_dma);
9792 goto out_ipr_free_cmd_blocks;
9794 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9797 ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9798 ioa_cfg->cfg_table_size,
9799 &ioa_cfg->cfg_table_dma,
9802 if (!ioa_cfg->u.cfg_table)
9803 goto out_free_host_rrq;
9805 for (i = 0; i < IPR_MAX_HCAMS; i++) {
9806 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9807 sizeof(struct ipr_hostrcb),
9808 &ioa_cfg->hostrcb_dma[i],
9811 if (!ioa_cfg->hostrcb[i])
9812 goto out_free_hostrcb_dma;
9814 ioa_cfg->hostrcb[i]->hostrcb_dma =
9815 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9816 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9817 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9820 ioa_cfg->trace = kcalloc(IPR_NUM_TRACE_ENTRIES,
9821 sizeof(struct ipr_trace_entry),
9824 if (!ioa_cfg->trace)
9825 goto out_free_hostrcb_dma;
9832 out_free_hostrcb_dma:
9834 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9835 ioa_cfg->hostrcb[i],
9836 ioa_cfg->hostrcb_dma[i]);
9838 dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9839 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9841 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9842 dma_free_coherent(&pdev->dev,
9843 sizeof(u32) * ioa_cfg->hrrq[i].size,
9844 ioa_cfg->hrrq[i].host_rrq,
9845 ioa_cfg->hrrq[i].host_rrq_dma);
9847 out_ipr_free_cmd_blocks:
9848 ipr_free_cmd_blks(ioa_cfg);
9850 dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9851 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9852 out_free_res_entries:
9853 kfree(ioa_cfg->res_entries);
9858 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9859 * @ioa_cfg: ioa config struct
9864 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9868 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9869 ioa_cfg->bus_attr[i].bus = i;
9870 ioa_cfg->bus_attr[i].qas_enabled = 0;
9871 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9872 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9873 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9875 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9880 * ipr_init_regs - Initialize IOA registers
9881 * @ioa_cfg: ioa config struct
9886 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9888 const struct ipr_interrupt_offsets *p;
9889 struct ipr_interrupts *t;
9892 p = &ioa_cfg->chip_cfg->regs;
9894 base = ioa_cfg->hdw_dma_regs;
9896 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9897 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9898 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9899 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9900 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9901 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9902 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9903 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9904 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9905 t->ioarrin_reg = base + p->ioarrin_reg;
9906 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9907 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9908 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9909 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9910 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9911 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9913 if (ioa_cfg->sis64) {
9914 t->init_feedback_reg = base + p->init_feedback_reg;
9915 t->dump_addr_reg = base + p->dump_addr_reg;
9916 t->dump_data_reg = base + p->dump_data_reg;
9917 t->endian_swap_reg = base + p->endian_swap_reg;
9922 * ipr_init_ioa_cfg - Initialize IOA config struct
9923 * @ioa_cfg: ioa config struct
9924 * @host: scsi host struct
9925 * @pdev: PCI dev struct
9930 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9931 struct Scsi_Host *host, struct pci_dev *pdev)
9935 ioa_cfg->host = host;
9936 ioa_cfg->pdev = pdev;
9937 ioa_cfg->log_level = ipr_log_level;
9938 ioa_cfg->doorbell = IPR_DOORBELL;
9939 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9940 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9941 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9942 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9943 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9944 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9946 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9947 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9948 INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q);
9949 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9950 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9951 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9952 INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread);
9953 init_waitqueue_head(&ioa_cfg->reset_wait_q);
9954 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9955 init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9956 ioa_cfg->sdt_state = INACTIVE;
9958 ipr_initialize_bus_attr(ioa_cfg);
9959 ioa_cfg->max_devs_supported = ipr_max_devs;
9961 if (ioa_cfg->sis64) {
9962 host->max_channel = IPR_MAX_SIS64_BUSES;
9963 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9964 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9965 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9966 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9967 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9968 + ((sizeof(struct ipr_config_table_entry64)
9969 * ioa_cfg->max_devs_supported)));
9971 host->max_channel = IPR_VSET_BUS;
9972 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9973 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9974 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9975 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9976 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9977 + ((sizeof(struct ipr_config_table_entry)
9978 * ioa_cfg->max_devs_supported)));
9981 host->unique_id = host->host_no;
9982 host->max_cmd_len = IPR_MAX_CDB_LEN;
9983 host->can_queue = ioa_cfg->max_cmds;
9984 pci_set_drvdata(pdev, ioa_cfg);
9986 for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9987 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9988 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9989 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9991 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9993 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9998 * ipr_get_chip_info - Find adapter chip information
9999 * @dev_id: PCI device id struct
10002 * ptr to chip information on success / NULL on failure
10004 static const struct ipr_chip_t *
10005 ipr_get_chip_info(const struct pci_device_id *dev_id)
10009 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
10010 if (ipr_chip[i].vendor == dev_id->vendor &&
10011 ipr_chip[i].device == dev_id->device)
10012 return &ipr_chip[i];
10017 * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
10018 * during probe time
10019 * @ioa_cfg: ioa config struct
10024 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
10026 struct pci_dev *pdev = ioa_cfg->pdev;
10028 if (pci_channel_offline(pdev)) {
10029 wait_event_timeout(ioa_cfg->eeh_wait_q,
10030 !pci_channel_offline(pdev),
10031 IPR_PCI_ERROR_RECOVERY_TIMEOUT);
10032 pci_restore_state(pdev);
10036 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
10038 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
10040 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
10041 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
10042 "host%d-%d", ioa_cfg->host->host_no, vec_idx);
10043 ioa_cfg->vectors_info[vec_idx].
10044 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
10048 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
10049 struct pci_dev *pdev)
10053 for (i = 1; i < ioa_cfg->nvectors; i++) {
10054 rc = request_irq(pci_irq_vector(pdev, i),
10057 ioa_cfg->vectors_info[i].desc,
10058 &ioa_cfg->hrrq[i]);
10061 free_irq(pci_irq_vector(pdev, i),
10062 &ioa_cfg->hrrq[i]);
10070 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
10071 * @pdev: PCI device struct
10073 * Description: Simply set the msi_received flag to 1 indicating that
10074 * Message Signaled Interrupts are supported.
10077 * 0 on success / non-zero on failure
10079 static irqreturn_t ipr_test_intr(int irq, void *devp)
10081 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
10082 unsigned long lock_flags = 0;
10083 irqreturn_t rc = IRQ_HANDLED;
10085 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
10086 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10088 ioa_cfg->msi_received = 1;
10089 wake_up(&ioa_cfg->msi_wait_q);
10091 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10096 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
10097 * @pdev: PCI device struct
10099 * Description: This routine sets up and initiates a test interrupt to determine
10100 * if the interrupt is received via the ipr_test_intr() service routine.
10101 * If the tests fails, the driver will fall back to LSI.
10104 * 0 on success / non-zero on failure
10106 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
10109 volatile u32 int_reg;
10110 unsigned long lock_flags = 0;
10111 int irq = pci_irq_vector(pdev, 0);
10115 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10116 init_waitqueue_head(&ioa_cfg->msi_wait_q);
10117 ioa_cfg->msi_received = 0;
10118 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10119 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
10120 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
10121 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10123 rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
10125 dev_err(&pdev->dev, "Can not assign irq %d\n", irq);
10127 } else if (ipr_debug)
10128 dev_info(&pdev->dev, "IRQ assigned: %d\n", irq);
10130 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
10131 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
10132 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
10133 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10134 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10136 if (!ioa_cfg->msi_received) {
10137 /* MSI test failed */
10138 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
10140 } else if (ipr_debug)
10141 dev_info(&pdev->dev, "MSI test succeeded.\n");
10143 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10145 free_irq(irq, ioa_cfg);
10152 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
10153 * @pdev: PCI device struct
10154 * @dev_id: PCI device id struct
10157 * 0 on success / non-zero on failure
10159 static int ipr_probe_ioa(struct pci_dev *pdev,
10160 const struct pci_device_id *dev_id)
10162 struct ipr_ioa_cfg *ioa_cfg;
10163 struct Scsi_Host *host;
10164 unsigned long ipr_regs_pci;
10165 void __iomem *ipr_regs;
10166 int rc = PCIBIOS_SUCCESSFUL;
10167 volatile u32 mask, uproc, interrupts;
10168 unsigned long lock_flags, driver_lock_flags;
10169 unsigned int irq_flag;
10173 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
10174 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
10177 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
10182 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
10183 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
10184 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
10186 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
10188 if (!ioa_cfg->ipr_chip) {
10189 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
10190 dev_id->vendor, dev_id->device);
10191 goto out_scsi_host_put;
10194 /* set SIS 32 or SIS 64 */
10195 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
10196 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
10197 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
10198 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
10200 if (ipr_transop_timeout)
10201 ioa_cfg->transop_timeout = ipr_transop_timeout;
10202 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
10203 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
10205 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
10207 ioa_cfg->revid = pdev->revision;
10209 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
10211 ipr_regs_pci = pci_resource_start(pdev, 0);
10213 rc = pci_request_regions(pdev, IPR_NAME);
10215 dev_err(&pdev->dev,
10216 "Couldn't register memory range of registers\n");
10217 goto out_scsi_host_put;
10220 rc = pci_enable_device(pdev);
10222 if (rc || pci_channel_offline(pdev)) {
10223 if (pci_channel_offline(pdev)) {
10224 ipr_wait_for_pci_err_recovery(ioa_cfg);
10225 rc = pci_enable_device(pdev);
10229 dev_err(&pdev->dev, "Cannot enable adapter\n");
10230 ipr_wait_for_pci_err_recovery(ioa_cfg);
10231 goto out_release_regions;
10235 ipr_regs = pci_ioremap_bar(pdev, 0);
10238 dev_err(&pdev->dev,
10239 "Couldn't map memory range of registers\n");
10244 ioa_cfg->hdw_dma_regs = ipr_regs;
10245 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
10246 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
10248 ipr_init_regs(ioa_cfg);
10250 if (ioa_cfg->sis64) {
10251 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10253 dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
10254 rc = dma_set_mask_and_coherent(&pdev->dev,
10258 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10261 dev_err(&pdev->dev, "Failed to set DMA mask\n");
10262 goto cleanup_nomem;
10265 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
10266 ioa_cfg->chip_cfg->cache_line_size);
10268 if (rc != PCIBIOS_SUCCESSFUL) {
10269 dev_err(&pdev->dev, "Write of cache line size failed\n");
10270 ipr_wait_for_pci_err_recovery(ioa_cfg);
10272 goto cleanup_nomem;
10275 /* Issue MMIO read to ensure card is not in EEH */
10276 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
10277 ipr_wait_for_pci_err_recovery(ioa_cfg);
10279 if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
10280 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
10281 IPR_MAX_MSIX_VECTORS);
10282 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
10285 irq_flag = PCI_IRQ_LEGACY;
10286 if (ioa_cfg->ipr_chip->has_msi)
10287 irq_flag |= PCI_IRQ_MSI | PCI_IRQ_MSIX;
10288 rc = pci_alloc_irq_vectors(pdev, 1, ipr_number_of_msix, irq_flag);
10290 ipr_wait_for_pci_err_recovery(ioa_cfg);
10291 goto cleanup_nomem;
10293 ioa_cfg->nvectors = rc;
10295 if (!pdev->msi_enabled && !pdev->msix_enabled)
10296 ioa_cfg->clear_isr = 1;
10298 pci_set_master(pdev);
10300 if (pci_channel_offline(pdev)) {
10301 ipr_wait_for_pci_err_recovery(ioa_cfg);
10302 pci_set_master(pdev);
10303 if (pci_channel_offline(pdev)) {
10305 goto out_msi_disable;
10309 if (pdev->msi_enabled || pdev->msix_enabled) {
10310 rc = ipr_test_msi(ioa_cfg, pdev);
10313 dev_info(&pdev->dev,
10314 "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors,
10315 pdev->msix_enabled ? "-X" : "");
10318 ipr_wait_for_pci_err_recovery(ioa_cfg);
10319 pci_free_irq_vectors(pdev);
10321 ioa_cfg->nvectors = 1;
10322 ioa_cfg->clear_isr = 1;
10325 goto out_msi_disable;
10329 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
10330 (unsigned int)num_online_cpus(),
10331 (unsigned int)IPR_MAX_HRRQ_NUM);
10333 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
10334 goto out_msi_disable;
10336 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
10337 goto out_msi_disable;
10339 rc = ipr_alloc_mem(ioa_cfg);
10341 dev_err(&pdev->dev,
10342 "Couldn't allocate enough memory for device driver!\n");
10343 goto out_msi_disable;
10346 /* Save away PCI config space for use following IOA reset */
10347 rc = pci_save_state(pdev);
10349 if (rc != PCIBIOS_SUCCESSFUL) {
10350 dev_err(&pdev->dev, "Failed to save PCI config space\n");
10352 goto cleanup_nolog;
10356 * If HRRQ updated interrupt is not masked, or reset alert is set,
10357 * the card is in an unknown state and needs a hard reset
10359 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
10360 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
10361 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
10362 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
10363 ioa_cfg->needs_hard_reset = 1;
10364 if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
10365 ioa_cfg->needs_hard_reset = 1;
10366 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
10367 ioa_cfg->ioa_unit_checked = 1;
10369 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10370 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10371 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10373 if (pdev->msi_enabled || pdev->msix_enabled) {
10374 name_msi_vectors(ioa_cfg);
10375 rc = request_irq(pci_irq_vector(pdev, 0), ipr_isr, 0,
10376 ioa_cfg->vectors_info[0].desc,
10377 &ioa_cfg->hrrq[0]);
10379 rc = ipr_request_other_msi_irqs(ioa_cfg, pdev);
10381 rc = request_irq(pdev->irq, ipr_isr,
10383 IPR_NAME, &ioa_cfg->hrrq[0]);
10386 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
10388 goto cleanup_nolog;
10391 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
10392 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
10393 ioa_cfg->needs_warm_reset = 1;
10394 ioa_cfg->reset = ipr_reset_slot_reset;
10396 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
10397 WQ_MEM_RECLAIM, host->host_no);
10399 if (!ioa_cfg->reset_work_q) {
10400 dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
10405 ioa_cfg->reset = ipr_reset_start_bist;
10407 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10408 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
10409 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10416 ipr_free_irqs(ioa_cfg);
10418 ipr_free_mem(ioa_cfg);
10420 ipr_wait_for_pci_err_recovery(ioa_cfg);
10421 pci_free_irq_vectors(pdev);
10425 pci_disable_device(pdev);
10426 out_release_regions:
10427 pci_release_regions(pdev);
10429 scsi_host_put(host);
10434 * ipr_initiate_ioa_bringdown - Bring down an adapter
10435 * @ioa_cfg: ioa config struct
10436 * @shutdown_type: shutdown type
10438 * Description: This function will initiate bringing down the adapter.
10439 * This consists of issuing an IOA shutdown to the adapter
10440 * to flush the cache, and running BIST.
10441 * If the caller needs to wait on the completion of the reset,
10442 * the caller must sleep on the reset_wait_q.
10447 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
10448 enum ipr_shutdown_type shutdown_type)
10451 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
10452 ioa_cfg->sdt_state = ABORT_DUMP;
10453 ioa_cfg->reset_retries = 0;
10454 ioa_cfg->in_ioa_bringdown = 1;
10455 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
10460 * __ipr_remove - Remove a single adapter
10461 * @pdev: pci device struct
10463 * Adapter hot plug remove entry point.
10468 static void __ipr_remove(struct pci_dev *pdev)
10470 unsigned long host_lock_flags = 0;
10471 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10473 unsigned long driver_lock_flags;
10476 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10477 while (ioa_cfg->in_reset_reload) {
10478 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10479 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10480 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10483 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
10484 spin_lock(&ioa_cfg->hrrq[i]._lock);
10485 ioa_cfg->hrrq[i].removing_ioa = 1;
10486 spin_unlock(&ioa_cfg->hrrq[i]._lock);
10489 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10491 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10492 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10493 flush_work(&ioa_cfg->work_q);
10494 if (ioa_cfg->reset_work_q)
10495 flush_workqueue(ioa_cfg->reset_work_q);
10496 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
10497 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10499 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10500 list_del(&ioa_cfg->queue);
10501 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10503 if (ioa_cfg->sdt_state == ABORT_DUMP)
10504 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
10505 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10507 ipr_free_all_resources(ioa_cfg);
10513 * ipr_remove - IOA hot plug remove entry point
10514 * @pdev: pci device struct
10516 * Adapter hot plug remove entry point.
10521 static void ipr_remove(struct pci_dev *pdev)
10523 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10527 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10529 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10531 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10532 &ipr_ioa_async_err_log);
10533 scsi_remove_host(ioa_cfg->host);
10535 __ipr_remove(pdev);
10541 * ipr_probe - Adapter hot plug add entry point
10544 * 0 on success / non-zero on failure
10546 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
10548 struct ipr_ioa_cfg *ioa_cfg;
10549 unsigned long flags;
10552 rc = ipr_probe_ioa(pdev, dev_id);
10557 ioa_cfg = pci_get_drvdata(pdev);
10558 rc = ipr_probe_ioa_part2(ioa_cfg);
10561 __ipr_remove(pdev);
10565 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
10568 __ipr_remove(pdev);
10572 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
10576 scsi_remove_host(ioa_cfg->host);
10577 __ipr_remove(pdev);
10581 rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj,
10582 &ipr_ioa_async_err_log);
10585 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10587 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10589 scsi_remove_host(ioa_cfg->host);
10590 __ipr_remove(pdev);
10594 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
10598 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10599 &ipr_ioa_async_err_log);
10600 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10602 scsi_remove_host(ioa_cfg->host);
10603 __ipr_remove(pdev);
10606 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10607 ioa_cfg->scan_enabled = 1;
10608 schedule_work(&ioa_cfg->work_q);
10609 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10611 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
10613 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10614 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
10615 irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
10616 ioa_cfg->iopoll_weight, ipr_iopoll);
10620 scsi_scan_host(ioa_cfg->host);
10626 * ipr_shutdown - Shutdown handler.
10627 * @pdev: pci device struct
10629 * This function is invoked upon system shutdown/reboot. It will issue
10630 * an adapter shutdown to the adapter to flush the write cache.
10635 static void ipr_shutdown(struct pci_dev *pdev)
10637 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10638 unsigned long lock_flags = 0;
10639 enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
10642 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10643 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10644 ioa_cfg->iopoll_weight = 0;
10645 for (i = 1; i < ioa_cfg->hrrq_num; i++)
10646 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
10649 while (ioa_cfg->in_reset_reload) {
10650 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10651 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10652 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10655 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
10656 shutdown_type = IPR_SHUTDOWN_QUIESCE;
10658 ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
10659 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10660 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10661 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
10662 ipr_free_irqs(ioa_cfg);
10663 pci_disable_device(ioa_cfg->pdev);
10667 static struct pci_device_id ipr_pci_table[] = {
10668 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10669 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
10670 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10671 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
10672 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10673 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
10674 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10675 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
10676 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10677 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
10678 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10679 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
10680 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10681 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
10682 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10683 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10684 IPR_USE_LONG_TRANSOP_TIMEOUT },
10685 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10686 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10687 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10688 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10689 IPR_USE_LONG_TRANSOP_TIMEOUT },
10690 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10691 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10692 IPR_USE_LONG_TRANSOP_TIMEOUT },
10693 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10694 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10695 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10696 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10697 IPR_USE_LONG_TRANSOP_TIMEOUT},
10698 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10699 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10700 IPR_USE_LONG_TRANSOP_TIMEOUT },
10701 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10702 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10703 IPR_USE_LONG_TRANSOP_TIMEOUT },
10704 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10705 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10706 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10707 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10708 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10709 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
10710 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
10711 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
10712 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
10713 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10714 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
10715 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10716 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10717 IPR_USE_LONG_TRANSOP_TIMEOUT },
10718 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10719 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10720 IPR_USE_LONG_TRANSOP_TIMEOUT },
10721 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10722 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10723 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10724 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10725 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10726 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
10727 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10728 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10729 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10730 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10731 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10732 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
10733 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10734 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
10735 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10736 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
10737 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10738 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
10739 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10740 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10741 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10742 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
10743 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10744 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10745 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10746 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10747 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10748 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10749 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10750 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
10751 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10752 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10753 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10754 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10755 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10756 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10757 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10758 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10759 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10760 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10761 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10762 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10763 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10764 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10765 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10766 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
10767 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10768 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10769 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10770 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10771 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10772 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
10773 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10774 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580A, 0, 0, 0 },
10775 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10776 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580B, 0, 0, 0 },
10779 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10781 static const struct pci_error_handlers ipr_err_handler = {
10782 .error_detected = ipr_pci_error_detected,
10783 .mmio_enabled = ipr_pci_mmio_enabled,
10784 .slot_reset = ipr_pci_slot_reset,
10787 static struct pci_driver ipr_driver = {
10789 .id_table = ipr_pci_table,
10790 .probe = ipr_probe,
10791 .remove = ipr_remove,
10792 .shutdown = ipr_shutdown,
10793 .err_handler = &ipr_err_handler,
10797 * ipr_halt_done - Shutdown prepare completion
10802 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10804 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10808 * ipr_halt - Issue shutdown prepare to all adapters
10811 * NOTIFY_OK on success / NOTIFY_DONE on failure
10813 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10815 struct ipr_cmnd *ipr_cmd;
10816 struct ipr_ioa_cfg *ioa_cfg;
10817 unsigned long flags = 0, driver_lock_flags;
10819 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10820 return NOTIFY_DONE;
10822 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10824 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10825 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10826 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10827 (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
10828 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10832 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10833 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10834 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10835 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10836 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10838 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10839 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10841 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10846 static struct notifier_block ipr_notifier = {
10851 * ipr_init - Module entry point
10854 * 0 on success / negative value on failure
10856 static int __init ipr_init(void)
10860 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10861 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10863 register_reboot_notifier(&ipr_notifier);
10864 rc = pci_register_driver(&ipr_driver);
10866 unregister_reboot_notifier(&ipr_notifier);
10874 * ipr_exit - Module unload
10876 * Module unload entry point.
10881 static void __exit ipr_exit(void)
10883 unregister_reboot_notifier(&ipr_notifier);
10884 pci_unregister_driver(&ipr_driver);
10887 module_init(ipr_init);
10888 module_exit(ipr_exit);