2 * ipr.c -- driver for IBM Power Linux RAID adapters
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
6 * Copyright (C) 2003, 2004 IBM Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 * This driver is used to control the following SCSI adapters:
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
50 * - Tagged command queuing
51 * - Adapter microcode download
53 * - SCSI device hot plug
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
92 static LIST_HEAD(ipr_ioa_head);
93 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94 static unsigned int ipr_max_speed = 1;
95 static int ipr_testmode = 0;
96 static unsigned int ipr_fastfail = 0;
97 static unsigned int ipr_transop_timeout = 0;
98 static unsigned int ipr_debug = 0;
99 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100 static unsigned int ipr_dual_ioa_raid = 1;
101 static unsigned int ipr_number_of_msix = 16;
102 static unsigned int ipr_fast_reboot;
103 static DEFINE_SPINLOCK(ipr_driver_lock);
105 /* This table describes the differences between DMA controller chips */
106 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
107 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
110 .cache_line_size = 0x20,
114 .set_interrupt_mask_reg = 0x0022C,
115 .clr_interrupt_mask_reg = 0x00230,
116 .clr_interrupt_mask_reg32 = 0x00230,
117 .sense_interrupt_mask_reg = 0x0022C,
118 .sense_interrupt_mask_reg32 = 0x0022C,
119 .clr_interrupt_reg = 0x00228,
120 .clr_interrupt_reg32 = 0x00228,
121 .sense_interrupt_reg = 0x00224,
122 .sense_interrupt_reg32 = 0x00224,
123 .ioarrin_reg = 0x00404,
124 .sense_uproc_interrupt_reg = 0x00214,
125 .sense_uproc_interrupt_reg32 = 0x00214,
126 .set_uproc_interrupt_reg = 0x00214,
127 .set_uproc_interrupt_reg32 = 0x00214,
128 .clr_uproc_interrupt_reg = 0x00218,
129 .clr_uproc_interrupt_reg32 = 0x00218
132 { /* Snipe and Scamp */
135 .cache_line_size = 0x20,
139 .set_interrupt_mask_reg = 0x00288,
140 .clr_interrupt_mask_reg = 0x0028C,
141 .clr_interrupt_mask_reg32 = 0x0028C,
142 .sense_interrupt_mask_reg = 0x00288,
143 .sense_interrupt_mask_reg32 = 0x00288,
144 .clr_interrupt_reg = 0x00284,
145 .clr_interrupt_reg32 = 0x00284,
146 .sense_interrupt_reg = 0x00280,
147 .sense_interrupt_reg32 = 0x00280,
148 .ioarrin_reg = 0x00504,
149 .sense_uproc_interrupt_reg = 0x00290,
150 .sense_uproc_interrupt_reg32 = 0x00290,
151 .set_uproc_interrupt_reg = 0x00290,
152 .set_uproc_interrupt_reg32 = 0x00290,
153 .clr_uproc_interrupt_reg = 0x00294,
154 .clr_uproc_interrupt_reg32 = 0x00294
160 .cache_line_size = 0x20,
164 .set_interrupt_mask_reg = 0x00010,
165 .clr_interrupt_mask_reg = 0x00018,
166 .clr_interrupt_mask_reg32 = 0x0001C,
167 .sense_interrupt_mask_reg = 0x00010,
168 .sense_interrupt_mask_reg32 = 0x00014,
169 .clr_interrupt_reg = 0x00008,
170 .clr_interrupt_reg32 = 0x0000C,
171 .sense_interrupt_reg = 0x00000,
172 .sense_interrupt_reg32 = 0x00004,
173 .ioarrin_reg = 0x00070,
174 .sense_uproc_interrupt_reg = 0x00020,
175 .sense_uproc_interrupt_reg32 = 0x00024,
176 .set_uproc_interrupt_reg = 0x00020,
177 .set_uproc_interrupt_reg32 = 0x00024,
178 .clr_uproc_interrupt_reg = 0x00028,
179 .clr_uproc_interrupt_reg32 = 0x0002C,
180 .init_feedback_reg = 0x0005C,
181 .dump_addr_reg = 0x00064,
182 .dump_data_reg = 0x00068,
183 .endian_swap_reg = 0x00084
188 static const struct ipr_chip_t ipr_chip[] = {
189 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, true, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
194 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
196 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
197 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
198 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
201 static int ipr_max_bus_speeds[] = {
202 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
205 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
206 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
207 module_param_named(max_speed, ipr_max_speed, uint, 0);
208 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
209 module_param_named(log_level, ipr_log_level, uint, 0);
210 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
211 module_param_named(testmode, ipr_testmode, int, 0);
212 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
213 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
214 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
215 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
216 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
217 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
218 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
219 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
220 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
221 module_param_named(max_devs, ipr_max_devs, int, 0);
222 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
223 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
224 module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
225 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:16)");
226 module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
227 MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
228 MODULE_LICENSE("GPL");
229 MODULE_VERSION(IPR_DRIVER_VERSION);
231 /* A constant array of IOASCs/URCs/Error Messages */
233 struct ipr_error_table_t ipr_error_table[] = {
234 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
235 "8155: An unknown error was received"},
237 "Soft underlength error"},
239 "Command to be cancelled not found"},
241 "Qualified success"},
242 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
243 "FFFE: Soft device bus error recovered by the IOA"},
244 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
245 "4101: Soft device bus fabric error"},
246 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
247 "FFFC: Logical block guard error recovered by the device"},
248 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
249 "FFFC: Logical block reference tag error recovered by the device"},
250 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
251 "4171: Recovered scatter list tag / sequence number error"},
252 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
253 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
254 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
255 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
256 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
257 "FFFD: Recovered logical block reference tag error detected by the IOA"},
258 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
259 "FFFD: Logical block guard error recovered by the IOA"},
260 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
261 "FFF9: Device sector reassign successful"},
262 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
263 "FFF7: Media error recovered by device rewrite procedures"},
264 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
265 "7001: IOA sector reassignment successful"},
266 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
267 "FFF9: Soft media error. Sector reassignment recommended"},
268 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
269 "FFF7: Media error recovered by IOA rewrite procedures"},
270 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
271 "FF3D: Soft PCI bus error recovered by the IOA"},
272 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
273 "FFF6: Device hardware error recovered by the IOA"},
274 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
275 "FFF6: Device hardware error recovered by the device"},
276 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
277 "FF3D: Soft IOA error recovered by the IOA"},
278 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
279 "FFFA: Undefined device response recovered by the IOA"},
280 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
281 "FFF6: Device bus error, message or command phase"},
282 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
283 "FFFE: Task Management Function failed"},
284 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
285 "FFF6: Failure prediction threshold exceeded"},
286 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
287 "8009: Impending cache battery pack failure"},
289 "Logical Unit in process of becoming ready"},
291 "Initializing command required"},
293 "34FF: Disk device format in progress"},
295 "Logical unit not accessible, target port in unavailable state"},
296 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
297 "9070: IOA requested reset"},
299 "Synchronization required"},
301 "IOA microcode download required"},
303 "Device bus connection is prohibited by host"},
305 "No ready, IOA shutdown"},
307 "Not ready, IOA has been shutdown"},
308 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
309 "3020: Storage subsystem configuration error"},
311 "FFF5: Medium error, data unreadable, recommend reassign"},
313 "7000: Medium error, data unreadable, do not reassign"},
314 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
315 "FFF3: Disk media format bad"},
316 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
317 "3002: Addressed device failed to respond to selection"},
318 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
319 "3100: Device bus error"},
320 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
321 "3109: IOA timed out a device command"},
323 "3120: SCSI bus is not operational"},
324 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
325 "4100: Hard device bus fabric error"},
326 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
327 "310C: Logical block guard error detected by the device"},
328 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
329 "310C: Logical block reference tag error detected by the device"},
330 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
331 "4170: Scatter list tag / sequence number error"},
332 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
333 "8150: Logical block CRC error on IOA to Host transfer"},
334 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
335 "4170: Logical block sequence number error on IOA to Host transfer"},
336 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
337 "310D: Logical block reference tag error detected by the IOA"},
338 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
339 "310D: Logical block guard error detected by the IOA"},
340 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
341 "9000: IOA reserved area data check"},
342 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
343 "9001: IOA reserved area invalid data pattern"},
344 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
345 "9002: IOA reserved area LRC error"},
346 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
347 "Hardware Error, IOA metadata access error"},
348 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
349 "102E: Out of alternate sectors for disk storage"},
350 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
351 "FFF4: Data transfer underlength error"},
352 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
353 "FFF4: Data transfer overlength error"},
354 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
355 "3400: Logical unit failure"},
356 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
357 "FFF4: Device microcode is corrupt"},
358 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
359 "8150: PCI bus error"},
361 "Unsupported device bus message received"},
362 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
363 "FFF4: Disk device problem"},
364 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
365 "8150: Permanent IOA failure"},
366 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
367 "3010: Disk device returned wrong response to IOA"},
368 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
369 "8151: IOA microcode error"},
371 "Device bus status error"},
372 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
373 "8157: IOA error requiring IOA reset to recover"},
375 "ATA device status error"},
377 "Message reject received from the device"},
378 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
379 "8008: A permanent cache battery pack failure occurred"},
380 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
381 "9090: Disk unit has been modified after the last known status"},
382 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
383 "9081: IOA detected device error"},
384 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
385 "9082: IOA detected device error"},
386 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
387 "3110: Device bus error, message or command phase"},
388 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
389 "3110: SAS Command / Task Management Function failed"},
390 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
391 "9091: Incorrect hardware configuration change has been detected"},
392 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
393 "9073: Invalid multi-adapter configuration"},
394 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
395 "4010: Incorrect connection between cascaded expanders"},
396 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
397 "4020: Connections exceed IOA design limits"},
398 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
399 "4030: Incorrect multipath connection"},
400 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
401 "4110: Unsupported enclosure function"},
402 {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
403 "4120: SAS cable VPD cannot be read"},
404 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
405 "FFF4: Command to logical unit failed"},
407 "Illegal request, invalid request type or request packet"},
409 "Illegal request, invalid resource handle"},
411 "Illegal request, commands not allowed to this device"},
413 "Illegal request, command not allowed to a secondary adapter"},
415 "Illegal request, command not allowed to a non-optimized resource"},
417 "Illegal request, invalid field in parameter list"},
419 "Illegal request, parameter not supported"},
421 "Illegal request, parameter value invalid"},
423 "Illegal request, command sequence error"},
425 "Illegal request, dual adapter support not enabled"},
427 "Illegal request, another cable connector was physically disabled"},
429 "Illegal request, inconsistent group id/group count"},
430 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
431 "9031: Array protection temporarily suspended, protection resuming"},
432 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
433 "9040: Array protection temporarily suspended, protection resuming"},
434 {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
435 "4080: IOA exceeded maximum operating temperature"},
436 {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
437 "4085: Service required"},
438 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
439 "3140: Device bus not ready to ready transition"},
440 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
441 "FFFB: SCSI bus was reset"},
443 "FFFE: SCSI bus transition to single ended"},
445 "FFFE: SCSI bus transition to LVD"},
446 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
447 "FFFB: SCSI bus was reset by another initiator"},
448 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
449 "3029: A device replacement has occurred"},
450 {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
451 "4102: Device bus fabric performance degradation"},
452 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
453 "9051: IOA cache data exists for a missing or failed device"},
454 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
455 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
456 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
457 "9025: Disk unit is not supported at its physical location"},
458 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
459 "3020: IOA detected a SCSI bus configuration error"},
460 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
461 "3150: SCSI bus configuration error"},
462 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
463 "9074: Asymmetric advanced function disk configuration"},
464 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
465 "4040: Incomplete multipath connection between IOA and enclosure"},
466 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
467 "4041: Incomplete multipath connection between enclosure and device"},
468 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
469 "9075: Incomplete multipath connection between IOA and remote IOA"},
470 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
471 "9076: Configuration error, missing remote IOA"},
472 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
473 "4050: Enclosure does not support a required multipath function"},
474 {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
475 "4121: Configuration error, required cable is missing"},
476 {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
477 "4122: Cable is not plugged into the correct location on remote IOA"},
478 {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
479 "4123: Configuration error, invalid cable vital product data"},
480 {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
481 "4124: Configuration error, both cable ends are plugged into the same IOA"},
482 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
483 "4070: Logically bad block written on device"},
484 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
485 "9041: Array protection temporarily suspended"},
486 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
487 "9042: Corrupt array parity detected on specified device"},
488 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
489 "9030: Array no longer protected due to missing or failed disk unit"},
490 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
491 "9071: Link operational transition"},
492 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
493 "9072: Link not operational transition"},
494 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
495 "9032: Array exposed but still protected"},
496 {0x066B8300, 0, IPR_DEBUG_LOG_LEVEL,
497 "70DD: Device forced failed by disrupt device command"},
498 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
499 "4061: Multipath redundancy level got better"},
500 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
501 "4060: Multipath redundancy level got worse"},
502 {0x06808100, 0, IPR_DEBUG_LOG_LEVEL,
503 "9083: Device raw mode enabled"},
504 {0x06808200, 0, IPR_DEBUG_LOG_LEVEL,
505 "9084: Device raw mode disabled"},
507 "Failure due to other device"},
508 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
509 "9008: IOA does not support functions expected by devices"},
510 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
511 "9010: Cache data associated with attached devices cannot be found"},
512 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
513 "9011: Cache data belongs to devices other than those attached"},
514 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
515 "9020: Array missing 2 or more devices with only 1 device present"},
516 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
517 "9021: Array missing 2 or more devices with 2 or more devices present"},
518 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
519 "9022: Exposed array is missing a required device"},
520 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
521 "9023: Array member(s) not at required physical locations"},
522 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
523 "9024: Array not functional due to present hardware configuration"},
524 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
525 "9026: Array not functional due to present hardware configuration"},
526 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
527 "9027: Array is missing a device and parity is out of sync"},
528 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
529 "9028: Maximum number of arrays already exist"},
530 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
531 "9050: Required cache data cannot be located for a disk unit"},
532 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
533 "9052: Cache data exists for a device that has been modified"},
534 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
535 "9054: IOA resources not available due to previous problems"},
536 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
537 "9092: Disk unit requires initialization before use"},
538 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
539 "9029: Incorrect hardware configuration change has been detected"},
540 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
541 "9060: One or more disk pairs are missing from an array"},
542 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
543 "9061: One or more disks are missing from an array"},
544 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
545 "9062: One or more disks are missing from an array"},
546 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
547 "9063: Maximum number of functional arrays has been exceeded"},
549 "Data protect, other volume set problem"},
551 "Aborted command, invalid descriptor"},
553 "Target operating conditions have changed, dual adapter takeover"},
555 "Aborted command, medium removal prevented"},
557 "Command terminated by host"},
559 "Aborted command, command terminated by host"}
562 static const struct ipr_ses_table_entry ipr_ses_table[] = {
563 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
564 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
565 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
566 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
567 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
568 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
569 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
570 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
571 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
572 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
573 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
574 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
575 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
579 * Function Prototypes
581 static int ipr_reset_alert(struct ipr_cmnd *);
582 static void ipr_process_ccn(struct ipr_cmnd *);
583 static void ipr_process_error(struct ipr_cmnd *);
584 static void ipr_reset_ioa_job(struct ipr_cmnd *);
585 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
586 enum ipr_shutdown_type);
588 #ifdef CONFIG_SCSI_IPR_TRACE
590 * ipr_trc_hook - Add a trace entry to the driver trace
591 * @ipr_cmd: ipr command struct
593 * @add_data: additional data
598 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
599 u8 type, u32 add_data)
601 struct ipr_trace_entry *trace_entry;
602 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
603 unsigned int trace_index;
605 trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
606 trace_entry = &ioa_cfg->trace[trace_index];
607 trace_entry->time = jiffies;
608 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
609 trace_entry->type = type;
610 if (ipr_cmd->ioa_cfg->sis64)
611 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
613 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
614 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
615 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
616 trace_entry->u.add_data = add_data;
620 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
624 * ipr_lock_and_done - Acquire lock and complete command
625 * @ipr_cmd: ipr command struct
630 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
632 unsigned long lock_flags;
633 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
635 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
636 ipr_cmd->done(ipr_cmd);
637 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
641 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
642 * @ipr_cmd: ipr command struct
647 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
649 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
650 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
651 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
652 dma_addr_t dma_addr = ipr_cmd->dma_addr;
655 hrrq_id = ioarcb->cmd_pkt.hrrq_id;
656 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
657 ioarcb->cmd_pkt.hrrq_id = hrrq_id;
658 ioarcb->data_transfer_length = 0;
659 ioarcb->read_data_transfer_length = 0;
660 ioarcb->ioadl_len = 0;
661 ioarcb->read_ioadl_len = 0;
663 if (ipr_cmd->ioa_cfg->sis64) {
664 ioarcb->u.sis64_addr_data.data_ioadl_addr =
665 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
666 ioasa64->u.gata.status = 0;
668 ioarcb->write_ioadl_addr =
669 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
670 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
671 ioasa->u.gata.status = 0;
674 ioasa->hdr.ioasc = 0;
675 ioasa->hdr.residual_data_len = 0;
676 ipr_cmd->scsi_cmd = NULL;
678 ipr_cmd->sense_buffer[0] = 0;
679 ipr_cmd->dma_use_sg = 0;
683 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
684 * @ipr_cmd: ipr command struct
689 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
690 void (*fast_done) (struct ipr_cmnd *))
692 ipr_reinit_ipr_cmnd(ipr_cmd);
693 ipr_cmd->u.scratch = 0;
694 ipr_cmd->sibling = NULL;
695 ipr_cmd->eh_comp = NULL;
696 ipr_cmd->fast_done = fast_done;
697 init_timer(&ipr_cmd->timer);
701 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
702 * @ioa_cfg: ioa config struct
705 * pointer to ipr command struct
708 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
710 struct ipr_cmnd *ipr_cmd = NULL;
712 if (likely(!list_empty(&hrrq->hrrq_free_q))) {
713 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
714 struct ipr_cmnd, queue);
715 list_del(&ipr_cmd->queue);
723 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
724 * @ioa_cfg: ioa config struct
727 * pointer to ipr command struct
730 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
732 struct ipr_cmnd *ipr_cmd =
733 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
734 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
739 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
740 * @ioa_cfg: ioa config struct
741 * @clr_ints: interrupts to clear
743 * This function masks all interrupts on the adapter, then clears the
744 * interrupts specified in the mask
749 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
752 volatile u32 int_reg;
755 /* Stop new interrupts */
756 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
757 spin_lock(&ioa_cfg->hrrq[i]._lock);
758 ioa_cfg->hrrq[i].allow_interrupts = 0;
759 spin_unlock(&ioa_cfg->hrrq[i]._lock);
763 /* Set interrupt mask to stop all new interrupts */
765 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
767 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
769 /* Clear any pending interrupts */
771 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
772 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
773 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
777 * ipr_save_pcix_cmd_reg - Save PCI-X command register
778 * @ioa_cfg: ioa config struct
781 * 0 on success / -EIO on failure
783 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
785 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
787 if (pcix_cmd_reg == 0)
790 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
791 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
792 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
796 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
801 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
802 * @ioa_cfg: ioa config struct
805 * 0 on success / -EIO on failure
807 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
809 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
812 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
813 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
814 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
823 * __ipr_sata_eh_done - done function for aborted SATA commands
824 * @ipr_cmd: ipr command struct
826 * This function is invoked for ops generated to SATA
827 * devices which are being aborted.
832 static void __ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
834 struct ata_queued_cmd *qc = ipr_cmd->qc;
835 struct ipr_sata_port *sata_port = qc->ap->private_data;
837 qc->err_mask |= AC_ERR_OTHER;
838 sata_port->ioasa.status |= ATA_BUSY;
840 if (ipr_cmd->eh_comp)
841 complete(ipr_cmd->eh_comp);
842 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
846 * ipr_sata_eh_done - done function for aborted SATA commands
847 * @ipr_cmd: ipr command struct
849 * This function is invoked for ops generated to SATA
850 * devices which are being aborted.
855 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
857 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
858 unsigned long hrrq_flags;
860 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
861 __ipr_sata_eh_done(ipr_cmd);
862 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
866 * __ipr_scsi_eh_done - mid-layer done function for aborted ops
867 * @ipr_cmd: ipr command struct
869 * This function is invoked by the interrupt handler for
870 * ops generated by the SCSI mid-layer which are being aborted.
875 static void __ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
877 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
879 scsi_cmd->result |= (DID_ERROR << 16);
881 scsi_dma_unmap(ipr_cmd->scsi_cmd);
882 scsi_cmd->scsi_done(scsi_cmd);
883 if (ipr_cmd->eh_comp)
884 complete(ipr_cmd->eh_comp);
885 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
889 * ipr_scsi_eh_done - mid-layer done function for aborted ops
890 * @ipr_cmd: ipr command struct
892 * This function is invoked by the interrupt handler for
893 * ops generated by the SCSI mid-layer which are being aborted.
898 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
900 unsigned long hrrq_flags;
901 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
903 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
904 __ipr_scsi_eh_done(ipr_cmd);
905 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
909 * ipr_fail_all_ops - Fails all outstanding ops.
910 * @ioa_cfg: ioa config struct
912 * This function fails all outstanding ops.
917 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
919 struct ipr_cmnd *ipr_cmd, *temp;
920 struct ipr_hrr_queue *hrrq;
923 for_each_hrrq(hrrq, ioa_cfg) {
924 spin_lock(&hrrq->_lock);
925 list_for_each_entry_safe(ipr_cmd,
926 temp, &hrrq->hrrq_pending_q, queue) {
927 list_del(&ipr_cmd->queue);
929 ipr_cmd->s.ioasa.hdr.ioasc =
930 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
931 ipr_cmd->s.ioasa.hdr.ilid =
932 cpu_to_be32(IPR_DRIVER_ILID);
934 if (ipr_cmd->scsi_cmd)
935 ipr_cmd->done = __ipr_scsi_eh_done;
936 else if (ipr_cmd->qc)
937 ipr_cmd->done = __ipr_sata_eh_done;
939 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
940 IPR_IOASC_IOA_WAS_RESET);
941 del_timer(&ipr_cmd->timer);
942 ipr_cmd->done(ipr_cmd);
944 spin_unlock(&hrrq->_lock);
950 * ipr_send_command - Send driver initiated requests.
951 * @ipr_cmd: ipr command struct
953 * This function sends a command to the adapter using the correct write call.
954 * In the case of sis64, calculate the ioarcb size required. Then or in the
960 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
962 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
963 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
965 if (ioa_cfg->sis64) {
966 /* The default size is 256 bytes */
967 send_dma_addr |= 0x1;
969 /* If the number of ioadls * size of ioadl > 128 bytes,
970 then use a 512 byte ioarcb */
971 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
972 send_dma_addr |= 0x4;
973 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
975 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
979 * ipr_do_req - Send driver initiated requests.
980 * @ipr_cmd: ipr command struct
981 * @done: done function
982 * @timeout_func: timeout function
983 * @timeout: timeout value
985 * This function sends the specified command to the adapter with the
986 * timeout given. The done function is invoked on command completion.
991 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
992 void (*done) (struct ipr_cmnd *),
993 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
995 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
997 ipr_cmd->done = done;
999 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
1000 ipr_cmd->timer.expires = jiffies + timeout;
1001 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
1003 add_timer(&ipr_cmd->timer);
1005 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
1007 ipr_send_command(ipr_cmd);
1011 * ipr_internal_cmd_done - Op done function for an internally generated op.
1012 * @ipr_cmd: ipr command struct
1014 * This function is the op done function for an internally generated,
1015 * blocking op. It simply wakes the sleeping thread.
1020 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
1022 if (ipr_cmd->sibling)
1023 ipr_cmd->sibling = NULL;
1025 complete(&ipr_cmd->completion);
1029 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
1030 * @ipr_cmd: ipr command struct
1031 * @dma_addr: dma address
1032 * @len: transfer length
1033 * @flags: ioadl flag value
1035 * This function initializes an ioadl in the case where there is only a single
1041 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
1044 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1045 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
1047 ipr_cmd->dma_use_sg = 1;
1049 if (ipr_cmd->ioa_cfg->sis64) {
1050 ioadl64->flags = cpu_to_be32(flags);
1051 ioadl64->data_len = cpu_to_be32(len);
1052 ioadl64->address = cpu_to_be64(dma_addr);
1054 ipr_cmd->ioarcb.ioadl_len =
1055 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1056 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1058 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1059 ioadl->address = cpu_to_be32(dma_addr);
1061 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1062 ipr_cmd->ioarcb.read_ioadl_len =
1063 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1064 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1066 ipr_cmd->ioarcb.ioadl_len =
1067 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1068 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1074 * ipr_send_blocking_cmd - Send command and sleep on its completion.
1075 * @ipr_cmd: ipr command struct
1076 * @timeout_func: function to invoke if command times out
1082 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1083 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
1086 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1088 init_completion(&ipr_cmd->completion);
1089 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1091 spin_unlock_irq(ioa_cfg->host->host_lock);
1092 wait_for_completion(&ipr_cmd->completion);
1093 spin_lock_irq(ioa_cfg->host->host_lock);
1096 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1100 if (ioa_cfg->hrrq_num == 1)
1103 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1104 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1110 * ipr_send_hcam - Send an HCAM to the adapter.
1111 * @ioa_cfg: ioa config struct
1113 * @hostrcb: hostrcb struct
1115 * This function will send a Host Controlled Async command to the adapter.
1116 * If HCAMs are currently not allowed to be issued to the adapter, it will
1117 * place the hostrcb on the free queue.
1122 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1123 struct ipr_hostrcb *hostrcb)
1125 struct ipr_cmnd *ipr_cmd;
1126 struct ipr_ioarcb *ioarcb;
1128 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1129 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1130 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1131 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1133 ipr_cmd->u.hostrcb = hostrcb;
1134 ioarcb = &ipr_cmd->ioarcb;
1136 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1137 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1138 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1139 ioarcb->cmd_pkt.cdb[1] = type;
1140 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1141 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1143 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1144 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1146 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1147 ipr_cmd->done = ipr_process_ccn;
1149 ipr_cmd->done = ipr_process_error;
1151 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1153 ipr_send_command(ipr_cmd);
1155 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1160 * ipr_update_ata_class - Update the ata class in the resource entry
1161 * @res: resource entry struct
1162 * @proto: cfgte device bus protocol value
1167 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1170 case IPR_PROTO_SATA:
1171 case IPR_PROTO_SAS_STP:
1172 res->ata_class = ATA_DEV_ATA;
1174 case IPR_PROTO_SATA_ATAPI:
1175 case IPR_PROTO_SAS_STP_ATAPI:
1176 res->ata_class = ATA_DEV_ATAPI;
1179 res->ata_class = ATA_DEV_UNKNOWN;
1185 * ipr_init_res_entry - Initialize a resource entry struct.
1186 * @res: resource entry struct
1187 * @cfgtew: config table entry wrapper struct
1192 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1193 struct ipr_config_table_entry_wrapper *cfgtew)
1197 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1198 struct ipr_resource_entry *gscsi_res = NULL;
1200 res->needs_sync_complete = 0;
1203 res->del_from_ml = 0;
1204 res->resetting_device = 0;
1205 res->reset_occurred = 0;
1207 res->sata_port = NULL;
1209 if (ioa_cfg->sis64) {
1210 proto = cfgtew->u.cfgte64->proto;
1211 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1212 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1213 res->qmodel = IPR_QUEUEING_MODEL64(res);
1214 res->type = cfgtew->u.cfgte64->res_type;
1216 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1217 sizeof(res->res_path));
1220 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1221 sizeof(res->dev_lun.scsi_lun));
1222 res->lun = scsilun_to_int(&res->dev_lun);
1224 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1225 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1226 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1228 res->target = gscsi_res->target;
1233 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1234 ioa_cfg->max_devs_supported);
1235 set_bit(res->target, ioa_cfg->target_ids);
1237 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1238 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1240 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1241 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1242 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1243 ioa_cfg->max_devs_supported);
1244 set_bit(res->target, ioa_cfg->array_ids);
1245 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1246 res->bus = IPR_VSET_VIRTUAL_BUS;
1247 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1248 ioa_cfg->max_devs_supported);
1249 set_bit(res->target, ioa_cfg->vset_ids);
1251 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1252 ioa_cfg->max_devs_supported);
1253 set_bit(res->target, ioa_cfg->target_ids);
1256 proto = cfgtew->u.cfgte->proto;
1257 res->qmodel = IPR_QUEUEING_MODEL(res);
1258 res->flags = cfgtew->u.cfgte->flags;
1259 if (res->flags & IPR_IS_IOA_RESOURCE)
1260 res->type = IPR_RES_TYPE_IOAFP;
1262 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1264 res->bus = cfgtew->u.cfgte->res_addr.bus;
1265 res->target = cfgtew->u.cfgte->res_addr.target;
1266 res->lun = cfgtew->u.cfgte->res_addr.lun;
1267 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1270 ipr_update_ata_class(res, proto);
1274 * ipr_is_same_device - Determine if two devices are the same.
1275 * @res: resource entry struct
1276 * @cfgtew: config table entry wrapper struct
1279 * 1 if the devices are the same / 0 otherwise
1281 static int ipr_is_same_device(struct ipr_resource_entry *res,
1282 struct ipr_config_table_entry_wrapper *cfgtew)
1284 if (res->ioa_cfg->sis64) {
1285 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1286 sizeof(cfgtew->u.cfgte64->dev_id)) &&
1287 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1288 sizeof(cfgtew->u.cfgte64->lun))) {
1292 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1293 res->target == cfgtew->u.cfgte->res_addr.target &&
1294 res->lun == cfgtew->u.cfgte->res_addr.lun)
1302 * __ipr_format_res_path - Format the resource path for printing.
1303 * @res_path: resource path
1305 * @len: length of buffer provided
1310 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1316 p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1317 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1318 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1324 * ipr_format_res_path - Format the resource path for printing.
1325 * @ioa_cfg: ioa config struct
1326 * @res_path: resource path
1328 * @len: length of buffer provided
1333 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1334 u8 *res_path, char *buffer, int len)
1339 p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1340 __ipr_format_res_path(res_path, p, len - (buffer - p));
1345 * ipr_update_res_entry - Update the resource entry.
1346 * @res: resource entry struct
1347 * @cfgtew: config table entry wrapper struct
1352 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1353 struct ipr_config_table_entry_wrapper *cfgtew)
1355 char buffer[IPR_MAX_RES_PATH_LENGTH];
1359 if (res->ioa_cfg->sis64) {
1360 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1361 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1362 res->type = cfgtew->u.cfgte64->res_type;
1364 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1365 sizeof(struct ipr_std_inq_data));
1367 res->qmodel = IPR_QUEUEING_MODEL64(res);
1368 proto = cfgtew->u.cfgte64->proto;
1369 res->res_handle = cfgtew->u.cfgte64->res_handle;
1370 res->dev_id = cfgtew->u.cfgte64->dev_id;
1372 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1373 sizeof(res->dev_lun.scsi_lun));
1375 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1376 sizeof(res->res_path))) {
1377 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1378 sizeof(res->res_path));
1382 if (res->sdev && new_path)
1383 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1384 ipr_format_res_path(res->ioa_cfg,
1385 res->res_path, buffer, sizeof(buffer)));
1387 res->flags = cfgtew->u.cfgte->flags;
1388 if (res->flags & IPR_IS_IOA_RESOURCE)
1389 res->type = IPR_RES_TYPE_IOAFP;
1391 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1393 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1394 sizeof(struct ipr_std_inq_data));
1396 res->qmodel = IPR_QUEUEING_MODEL(res);
1397 proto = cfgtew->u.cfgte->proto;
1398 res->res_handle = cfgtew->u.cfgte->res_handle;
1401 ipr_update_ata_class(res, proto);
1405 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1407 * @res: resource entry struct
1408 * @cfgtew: config table entry wrapper struct
1413 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1415 struct ipr_resource_entry *gscsi_res = NULL;
1416 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1418 if (!ioa_cfg->sis64)
1421 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1422 clear_bit(res->target, ioa_cfg->array_ids);
1423 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1424 clear_bit(res->target, ioa_cfg->vset_ids);
1425 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1426 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1427 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1429 clear_bit(res->target, ioa_cfg->target_ids);
1431 } else if (res->bus == 0)
1432 clear_bit(res->target, ioa_cfg->target_ids);
1436 * ipr_handle_config_change - Handle a config change from the adapter
1437 * @ioa_cfg: ioa config struct
1443 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1444 struct ipr_hostrcb *hostrcb)
1446 struct ipr_resource_entry *res = NULL;
1447 struct ipr_config_table_entry_wrapper cfgtew;
1448 __be32 cc_res_handle;
1452 if (ioa_cfg->sis64) {
1453 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1454 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1456 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1457 cc_res_handle = cfgtew.u.cfgte->res_handle;
1460 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1461 if (res->res_handle == cc_res_handle) {
1468 if (list_empty(&ioa_cfg->free_res_q)) {
1469 ipr_send_hcam(ioa_cfg,
1470 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1475 res = list_entry(ioa_cfg->free_res_q.next,
1476 struct ipr_resource_entry, queue);
1478 list_del(&res->queue);
1479 ipr_init_res_entry(res, &cfgtew);
1480 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1483 ipr_update_res_entry(res, &cfgtew);
1485 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1487 res->del_from_ml = 1;
1488 res->res_handle = IPR_INVALID_RES_HANDLE;
1489 schedule_work(&ioa_cfg->work_q);
1491 ipr_clear_res_target(res);
1492 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1494 } else if (!res->sdev || res->del_from_ml) {
1496 schedule_work(&ioa_cfg->work_q);
1499 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1503 * ipr_process_ccn - Op done function for a CCN.
1504 * @ipr_cmd: ipr command struct
1506 * This function is the op done function for a configuration
1507 * change notification host controlled async from the adapter.
1512 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1514 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1515 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1516 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1518 list_del_init(&hostrcb->queue);
1519 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1522 if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
1523 ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
1524 dev_err(&ioa_cfg->pdev->dev,
1525 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1527 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1529 ipr_handle_config_change(ioa_cfg, hostrcb);
1534 * strip_whitespace - Strip and pad trailing whitespace.
1535 * @i: size of buffer
1536 * @buf: string to modify
1538 * This function will strip all trailing whitespace and
1539 * NUL terminate the string.
1542 static void strip_whitespace(int i, char *buf)
1547 while (i && buf[i] == ' ')
1553 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1554 * @prefix: string to print at start of printk
1555 * @hostrcb: hostrcb pointer
1556 * @vpd: vendor/product id/sn struct
1561 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1562 struct ipr_vpd *vpd)
1564 char vendor_id[IPR_VENDOR_ID_LEN + 1];
1565 char product_id[IPR_PROD_ID_LEN + 1];
1566 char sn[IPR_SERIAL_NUM_LEN + 1];
1568 memcpy(vendor_id, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1569 strip_whitespace(IPR_VENDOR_ID_LEN, vendor_id);
1571 memcpy(product_id, vpd->vpids.product_id, IPR_PROD_ID_LEN);
1572 strip_whitespace(IPR_PROD_ID_LEN, product_id);
1574 memcpy(sn, vpd->sn, IPR_SERIAL_NUM_LEN);
1575 strip_whitespace(IPR_SERIAL_NUM_LEN, sn);
1577 ipr_hcam_err(hostrcb, "%s VPID/SN: %s %s %s\n", prefix,
1578 vendor_id, product_id, sn);
1582 * ipr_log_vpd - Log the passed VPD to the error log.
1583 * @vpd: vendor/product id/sn struct
1588 static void ipr_log_vpd(struct ipr_vpd *vpd)
1590 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1591 + IPR_SERIAL_NUM_LEN];
1593 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1594 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1596 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1597 ipr_err("Vendor/Product ID: %s\n", buffer);
1599 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1600 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1601 ipr_err(" Serial Number: %s\n", buffer);
1605 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1606 * @prefix: string to print at start of printk
1607 * @hostrcb: hostrcb pointer
1608 * @vpd: vendor/product id/sn/wwn struct
1613 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1614 struct ipr_ext_vpd *vpd)
1616 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1617 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1618 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1622 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1623 * @vpd: vendor/product id/sn/wwn struct
1628 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1630 ipr_log_vpd(&vpd->vpd);
1631 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1632 be32_to_cpu(vpd->wwid[1]));
1636 * ipr_log_enhanced_cache_error - Log a cache error.
1637 * @ioa_cfg: ioa config struct
1638 * @hostrcb: hostrcb struct
1643 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1644 struct ipr_hostrcb *hostrcb)
1646 struct ipr_hostrcb_type_12_error *error;
1649 error = &hostrcb->hcam.u.error64.u.type_12_error;
1651 error = &hostrcb->hcam.u.error.u.type_12_error;
1653 ipr_err("-----Current Configuration-----\n");
1654 ipr_err("Cache Directory Card Information:\n");
1655 ipr_log_ext_vpd(&error->ioa_vpd);
1656 ipr_err("Adapter Card Information:\n");
1657 ipr_log_ext_vpd(&error->cfc_vpd);
1659 ipr_err("-----Expected Configuration-----\n");
1660 ipr_err("Cache Directory Card Information:\n");
1661 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1662 ipr_err("Adapter Card Information:\n");
1663 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1665 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1666 be32_to_cpu(error->ioa_data[0]),
1667 be32_to_cpu(error->ioa_data[1]),
1668 be32_to_cpu(error->ioa_data[2]));
1672 * ipr_log_cache_error - Log a cache error.
1673 * @ioa_cfg: ioa config struct
1674 * @hostrcb: hostrcb struct
1679 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1680 struct ipr_hostrcb *hostrcb)
1682 struct ipr_hostrcb_type_02_error *error =
1683 &hostrcb->hcam.u.error.u.type_02_error;
1685 ipr_err("-----Current Configuration-----\n");
1686 ipr_err("Cache Directory Card Information:\n");
1687 ipr_log_vpd(&error->ioa_vpd);
1688 ipr_err("Adapter Card Information:\n");
1689 ipr_log_vpd(&error->cfc_vpd);
1691 ipr_err("-----Expected Configuration-----\n");
1692 ipr_err("Cache Directory Card Information:\n");
1693 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1694 ipr_err("Adapter Card Information:\n");
1695 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1697 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1698 be32_to_cpu(error->ioa_data[0]),
1699 be32_to_cpu(error->ioa_data[1]),
1700 be32_to_cpu(error->ioa_data[2]));
1704 * ipr_log_enhanced_config_error - Log a configuration error.
1705 * @ioa_cfg: ioa config struct
1706 * @hostrcb: hostrcb struct
1711 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1712 struct ipr_hostrcb *hostrcb)
1714 int errors_logged, i;
1715 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1716 struct ipr_hostrcb_type_13_error *error;
1718 error = &hostrcb->hcam.u.error.u.type_13_error;
1719 errors_logged = be32_to_cpu(error->errors_logged);
1721 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1722 be32_to_cpu(error->errors_detected), errors_logged);
1724 dev_entry = error->dev;
1726 for (i = 0; i < errors_logged; i++, dev_entry++) {
1729 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1730 ipr_log_ext_vpd(&dev_entry->vpd);
1732 ipr_err("-----New Device Information-----\n");
1733 ipr_log_ext_vpd(&dev_entry->new_vpd);
1735 ipr_err("Cache Directory Card Information:\n");
1736 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1738 ipr_err("Adapter Card Information:\n");
1739 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1744 * ipr_log_sis64_config_error - Log a device error.
1745 * @ioa_cfg: ioa config struct
1746 * @hostrcb: hostrcb struct
1751 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1752 struct ipr_hostrcb *hostrcb)
1754 int errors_logged, i;
1755 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1756 struct ipr_hostrcb_type_23_error *error;
1757 char buffer[IPR_MAX_RES_PATH_LENGTH];
1759 error = &hostrcb->hcam.u.error64.u.type_23_error;
1760 errors_logged = be32_to_cpu(error->errors_logged);
1762 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1763 be32_to_cpu(error->errors_detected), errors_logged);
1765 dev_entry = error->dev;
1767 for (i = 0; i < errors_logged; i++, dev_entry++) {
1770 ipr_err("Device %d : %s", i + 1,
1771 __ipr_format_res_path(dev_entry->res_path,
1772 buffer, sizeof(buffer)));
1773 ipr_log_ext_vpd(&dev_entry->vpd);
1775 ipr_err("-----New Device Information-----\n");
1776 ipr_log_ext_vpd(&dev_entry->new_vpd);
1778 ipr_err("Cache Directory Card Information:\n");
1779 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1781 ipr_err("Adapter Card Information:\n");
1782 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1787 * ipr_log_config_error - Log a configuration error.
1788 * @ioa_cfg: ioa config struct
1789 * @hostrcb: hostrcb struct
1794 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1795 struct ipr_hostrcb *hostrcb)
1797 int errors_logged, i;
1798 struct ipr_hostrcb_device_data_entry *dev_entry;
1799 struct ipr_hostrcb_type_03_error *error;
1801 error = &hostrcb->hcam.u.error.u.type_03_error;
1802 errors_logged = be32_to_cpu(error->errors_logged);
1804 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1805 be32_to_cpu(error->errors_detected), errors_logged);
1807 dev_entry = error->dev;
1809 for (i = 0; i < errors_logged; i++, dev_entry++) {
1812 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1813 ipr_log_vpd(&dev_entry->vpd);
1815 ipr_err("-----New Device Information-----\n");
1816 ipr_log_vpd(&dev_entry->new_vpd);
1818 ipr_err("Cache Directory Card Information:\n");
1819 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1821 ipr_err("Adapter Card Information:\n");
1822 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1824 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1825 be32_to_cpu(dev_entry->ioa_data[0]),
1826 be32_to_cpu(dev_entry->ioa_data[1]),
1827 be32_to_cpu(dev_entry->ioa_data[2]),
1828 be32_to_cpu(dev_entry->ioa_data[3]),
1829 be32_to_cpu(dev_entry->ioa_data[4]));
1834 * ipr_log_enhanced_array_error - Log an array configuration error.
1835 * @ioa_cfg: ioa config struct
1836 * @hostrcb: hostrcb struct
1841 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1842 struct ipr_hostrcb *hostrcb)
1845 struct ipr_hostrcb_type_14_error *error;
1846 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1847 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1849 error = &hostrcb->hcam.u.error.u.type_14_error;
1853 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1854 error->protection_level,
1855 ioa_cfg->host->host_no,
1856 error->last_func_vset_res_addr.bus,
1857 error->last_func_vset_res_addr.target,
1858 error->last_func_vset_res_addr.lun);
1862 array_entry = error->array_member;
1863 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1864 ARRAY_SIZE(error->array_member));
1866 for (i = 0; i < num_entries; i++, array_entry++) {
1867 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1870 if (be32_to_cpu(error->exposed_mode_adn) == i)
1871 ipr_err("Exposed Array Member %d:\n", i);
1873 ipr_err("Array Member %d:\n", i);
1875 ipr_log_ext_vpd(&array_entry->vpd);
1876 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1877 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1878 "Expected Location");
1885 * ipr_log_array_error - Log an array configuration error.
1886 * @ioa_cfg: ioa config struct
1887 * @hostrcb: hostrcb struct
1892 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1893 struct ipr_hostrcb *hostrcb)
1896 struct ipr_hostrcb_type_04_error *error;
1897 struct ipr_hostrcb_array_data_entry *array_entry;
1898 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1900 error = &hostrcb->hcam.u.error.u.type_04_error;
1904 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1905 error->protection_level,
1906 ioa_cfg->host->host_no,
1907 error->last_func_vset_res_addr.bus,
1908 error->last_func_vset_res_addr.target,
1909 error->last_func_vset_res_addr.lun);
1913 array_entry = error->array_member;
1915 for (i = 0; i < 18; i++) {
1916 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1919 if (be32_to_cpu(error->exposed_mode_adn) == i)
1920 ipr_err("Exposed Array Member %d:\n", i);
1922 ipr_err("Array Member %d:\n", i);
1924 ipr_log_vpd(&array_entry->vpd);
1926 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1927 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1928 "Expected Location");
1933 array_entry = error->array_member2;
1940 * ipr_log_hex_data - Log additional hex IOA error data.
1941 * @ioa_cfg: ioa config struct
1942 * @data: IOA error data
1948 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
1955 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1956 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1958 for (i = 0; i < len / 4; i += 4) {
1959 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1960 be32_to_cpu(data[i]),
1961 be32_to_cpu(data[i+1]),
1962 be32_to_cpu(data[i+2]),
1963 be32_to_cpu(data[i+3]));
1968 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1969 * @ioa_cfg: ioa config struct
1970 * @hostrcb: hostrcb struct
1975 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1976 struct ipr_hostrcb *hostrcb)
1978 struct ipr_hostrcb_type_17_error *error;
1981 error = &hostrcb->hcam.u.error64.u.type_17_error;
1983 error = &hostrcb->hcam.u.error.u.type_17_error;
1985 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1986 strim(error->failure_reason);
1988 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1989 be32_to_cpu(hostrcb->hcam.u.error.prc));
1990 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1991 ipr_log_hex_data(ioa_cfg, error->data,
1992 be32_to_cpu(hostrcb->hcam.length) -
1993 (offsetof(struct ipr_hostrcb_error, u) +
1994 offsetof(struct ipr_hostrcb_type_17_error, data)));
1998 * ipr_log_dual_ioa_error - Log a dual adapter error.
1999 * @ioa_cfg: ioa config struct
2000 * @hostrcb: hostrcb struct
2005 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
2006 struct ipr_hostrcb *hostrcb)
2008 struct ipr_hostrcb_type_07_error *error;
2010 error = &hostrcb->hcam.u.error.u.type_07_error;
2011 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2012 strim(error->failure_reason);
2014 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
2015 be32_to_cpu(hostrcb->hcam.u.error.prc));
2016 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
2017 ipr_log_hex_data(ioa_cfg, error->data,
2018 be32_to_cpu(hostrcb->hcam.length) -
2019 (offsetof(struct ipr_hostrcb_error, u) +
2020 offsetof(struct ipr_hostrcb_type_07_error, data)));
2023 static const struct {
2026 } path_active_desc[] = {
2027 { IPR_PATH_NO_INFO, "Path" },
2028 { IPR_PATH_ACTIVE, "Active path" },
2029 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
2032 static const struct {
2035 } path_state_desc[] = {
2036 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
2037 { IPR_PATH_HEALTHY, "is healthy" },
2038 { IPR_PATH_DEGRADED, "is degraded" },
2039 { IPR_PATH_FAILED, "is failed" }
2043 * ipr_log_fabric_path - Log a fabric path error
2044 * @hostrcb: hostrcb struct
2045 * @fabric: fabric descriptor
2050 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
2051 struct ipr_hostrcb_fabric_desc *fabric)
2054 u8 path_state = fabric->path_state;
2055 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2056 u8 state = path_state & IPR_PATH_STATE_MASK;
2058 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2059 if (path_active_desc[i].active != active)
2062 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2063 if (path_state_desc[j].state != state)
2066 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2067 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2068 path_active_desc[i].desc, path_state_desc[j].desc,
2070 } else if (fabric->cascaded_expander == 0xff) {
2071 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2072 path_active_desc[i].desc, path_state_desc[j].desc,
2073 fabric->ioa_port, fabric->phy);
2074 } else if (fabric->phy == 0xff) {
2075 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2076 path_active_desc[i].desc, path_state_desc[j].desc,
2077 fabric->ioa_port, fabric->cascaded_expander);
2079 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2080 path_active_desc[i].desc, path_state_desc[j].desc,
2081 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2087 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2088 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2092 * ipr_log64_fabric_path - Log a fabric path error
2093 * @hostrcb: hostrcb struct
2094 * @fabric: fabric descriptor
2099 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2100 struct ipr_hostrcb64_fabric_desc *fabric)
2103 u8 path_state = fabric->path_state;
2104 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2105 u8 state = path_state & IPR_PATH_STATE_MASK;
2106 char buffer[IPR_MAX_RES_PATH_LENGTH];
2108 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2109 if (path_active_desc[i].active != active)
2112 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2113 if (path_state_desc[j].state != state)
2116 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2117 path_active_desc[i].desc, path_state_desc[j].desc,
2118 ipr_format_res_path(hostrcb->ioa_cfg,
2120 buffer, sizeof(buffer)));
2125 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2126 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2127 buffer, sizeof(buffer)));
2130 static const struct {
2133 } path_type_desc[] = {
2134 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2135 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2136 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2137 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2140 static const struct {
2143 } path_status_desc[] = {
2144 { IPR_PATH_CFG_NO_PROB, "Functional" },
2145 { IPR_PATH_CFG_DEGRADED, "Degraded" },
2146 { IPR_PATH_CFG_FAILED, "Failed" },
2147 { IPR_PATH_CFG_SUSPECT, "Suspect" },
2148 { IPR_PATH_NOT_DETECTED, "Missing" },
2149 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2152 static const char *link_rate[] = {
2155 "phy reset problem",
2172 * ipr_log_path_elem - Log a fabric path element.
2173 * @hostrcb: hostrcb struct
2174 * @cfg: fabric path element struct
2179 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2180 struct ipr_hostrcb_config_element *cfg)
2183 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2184 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2186 if (type == IPR_PATH_CFG_NOT_EXIST)
2189 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2190 if (path_type_desc[i].type != type)
2193 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2194 if (path_status_desc[j].status != status)
2197 if (type == IPR_PATH_CFG_IOA_PORT) {
2198 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2199 path_status_desc[j].desc, path_type_desc[i].desc,
2200 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2201 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2203 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2204 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2205 path_status_desc[j].desc, path_type_desc[i].desc,
2206 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2207 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2208 } else if (cfg->cascaded_expander == 0xff) {
2209 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2210 "WWN=%08X%08X\n", path_status_desc[j].desc,
2211 path_type_desc[i].desc, cfg->phy,
2212 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2213 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2214 } else if (cfg->phy == 0xff) {
2215 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2216 "WWN=%08X%08X\n", path_status_desc[j].desc,
2217 path_type_desc[i].desc, cfg->cascaded_expander,
2218 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2219 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2221 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2222 "WWN=%08X%08X\n", path_status_desc[j].desc,
2223 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2224 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2225 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2232 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2233 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2234 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2235 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2239 * ipr_log64_path_elem - Log a fabric path element.
2240 * @hostrcb: hostrcb struct
2241 * @cfg: fabric path element struct
2246 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2247 struct ipr_hostrcb64_config_element *cfg)
2250 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2251 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2252 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2253 char buffer[IPR_MAX_RES_PATH_LENGTH];
2255 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2258 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2259 if (path_type_desc[i].type != type)
2262 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2263 if (path_status_desc[j].status != status)
2266 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2267 path_status_desc[j].desc, path_type_desc[i].desc,
2268 ipr_format_res_path(hostrcb->ioa_cfg,
2269 cfg->res_path, buffer, sizeof(buffer)),
2270 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2271 be32_to_cpu(cfg->wwid[0]),
2272 be32_to_cpu(cfg->wwid[1]));
2276 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2277 "WWN=%08X%08X\n", cfg->type_status,
2278 ipr_format_res_path(hostrcb->ioa_cfg,
2279 cfg->res_path, buffer, sizeof(buffer)),
2280 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2281 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2285 * ipr_log_fabric_error - Log a fabric error.
2286 * @ioa_cfg: ioa config struct
2287 * @hostrcb: hostrcb struct
2292 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2293 struct ipr_hostrcb *hostrcb)
2295 struct ipr_hostrcb_type_20_error *error;
2296 struct ipr_hostrcb_fabric_desc *fabric;
2297 struct ipr_hostrcb_config_element *cfg;
2300 error = &hostrcb->hcam.u.error.u.type_20_error;
2301 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2302 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2304 add_len = be32_to_cpu(hostrcb->hcam.length) -
2305 (offsetof(struct ipr_hostrcb_error, u) +
2306 offsetof(struct ipr_hostrcb_type_20_error, desc));
2308 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2309 ipr_log_fabric_path(hostrcb, fabric);
2310 for_each_fabric_cfg(fabric, cfg)
2311 ipr_log_path_elem(hostrcb, cfg);
2313 add_len -= be16_to_cpu(fabric->length);
2314 fabric = (struct ipr_hostrcb_fabric_desc *)
2315 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2318 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2322 * ipr_log_sis64_array_error - Log a sis64 array error.
2323 * @ioa_cfg: ioa config struct
2324 * @hostrcb: hostrcb struct
2329 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2330 struct ipr_hostrcb *hostrcb)
2333 struct ipr_hostrcb_type_24_error *error;
2334 struct ipr_hostrcb64_array_data_entry *array_entry;
2335 char buffer[IPR_MAX_RES_PATH_LENGTH];
2336 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2338 error = &hostrcb->hcam.u.error64.u.type_24_error;
2342 ipr_err("RAID %s Array Configuration: %s\n",
2343 error->protection_level,
2344 ipr_format_res_path(ioa_cfg, error->last_res_path,
2345 buffer, sizeof(buffer)));
2349 array_entry = error->array_member;
2350 num_entries = min_t(u32, error->num_entries,
2351 ARRAY_SIZE(error->array_member));
2353 for (i = 0; i < num_entries; i++, array_entry++) {
2355 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2358 if (error->exposed_mode_adn == i)
2359 ipr_err("Exposed Array Member %d:\n", i);
2361 ipr_err("Array Member %d:\n", i);
2363 ipr_err("Array Member %d:\n", i);
2364 ipr_log_ext_vpd(&array_entry->vpd);
2365 ipr_err("Current Location: %s\n",
2366 ipr_format_res_path(ioa_cfg, array_entry->res_path,
2367 buffer, sizeof(buffer)));
2368 ipr_err("Expected Location: %s\n",
2369 ipr_format_res_path(ioa_cfg,
2370 array_entry->expected_res_path,
2371 buffer, sizeof(buffer)));
2378 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2379 * @ioa_cfg: ioa config struct
2380 * @hostrcb: hostrcb struct
2385 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2386 struct ipr_hostrcb *hostrcb)
2388 struct ipr_hostrcb_type_30_error *error;
2389 struct ipr_hostrcb64_fabric_desc *fabric;
2390 struct ipr_hostrcb64_config_element *cfg;
2393 error = &hostrcb->hcam.u.error64.u.type_30_error;
2395 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2396 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2398 add_len = be32_to_cpu(hostrcb->hcam.length) -
2399 (offsetof(struct ipr_hostrcb64_error, u) +
2400 offsetof(struct ipr_hostrcb_type_30_error, desc));
2402 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2403 ipr_log64_fabric_path(hostrcb, fabric);
2404 for_each_fabric_cfg(fabric, cfg)
2405 ipr_log64_path_elem(hostrcb, cfg);
2407 add_len -= be16_to_cpu(fabric->length);
2408 fabric = (struct ipr_hostrcb64_fabric_desc *)
2409 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2412 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2416 * ipr_log_generic_error - Log an adapter error.
2417 * @ioa_cfg: ioa config struct
2418 * @hostrcb: hostrcb struct
2423 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2424 struct ipr_hostrcb *hostrcb)
2426 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2427 be32_to_cpu(hostrcb->hcam.length));
2431 * ipr_log_sis64_device_error - Log a cache error.
2432 * @ioa_cfg: ioa config struct
2433 * @hostrcb: hostrcb struct
2438 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2439 struct ipr_hostrcb *hostrcb)
2441 struct ipr_hostrcb_type_21_error *error;
2442 char buffer[IPR_MAX_RES_PATH_LENGTH];
2444 error = &hostrcb->hcam.u.error64.u.type_21_error;
2446 ipr_err("-----Failing Device Information-----\n");
2447 ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2448 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2449 be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2450 ipr_err("Device Resource Path: %s\n",
2451 __ipr_format_res_path(error->res_path,
2452 buffer, sizeof(buffer)));
2453 error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2454 error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2455 ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2456 ipr_err("Secondary Problem Description: %s\n", error->second_problem_desc);
2457 ipr_err("SCSI Sense Data:\n");
2458 ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2459 ipr_err("SCSI Command Descriptor Block: \n");
2460 ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2462 ipr_err("Additional IOA Data:\n");
2463 ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2467 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2470 * This function will return the index of into the ipr_error_table
2471 * for the specified IOASC. If the IOASC is not in the table,
2472 * 0 will be returned, which points to the entry used for unknown errors.
2475 * index into the ipr_error_table
2477 static u32 ipr_get_error(u32 ioasc)
2481 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2482 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2489 * ipr_handle_log_data - Log an adapter error.
2490 * @ioa_cfg: ioa config struct
2491 * @hostrcb: hostrcb struct
2493 * This function logs an adapter error to the system.
2498 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2499 struct ipr_hostrcb *hostrcb)
2503 struct ipr_hostrcb_type_21_error *error;
2505 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2508 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2509 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2512 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2514 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2516 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2517 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2518 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2519 scsi_report_bus_reset(ioa_cfg->host,
2520 hostrcb->hcam.u.error.fd_res_addr.bus);
2523 error_index = ipr_get_error(ioasc);
2525 if (!ipr_error_table[error_index].log_hcam)
2528 if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2529 hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2530 error = &hostrcb->hcam.u.error64.u.type_21_error;
2532 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2533 ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2537 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2539 /* Set indication we have logged an error */
2540 ioa_cfg->errors_logged++;
2542 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2544 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2545 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2547 switch (hostrcb->hcam.overlay_id) {
2548 case IPR_HOST_RCB_OVERLAY_ID_2:
2549 ipr_log_cache_error(ioa_cfg, hostrcb);
2551 case IPR_HOST_RCB_OVERLAY_ID_3:
2552 ipr_log_config_error(ioa_cfg, hostrcb);
2554 case IPR_HOST_RCB_OVERLAY_ID_4:
2555 case IPR_HOST_RCB_OVERLAY_ID_6:
2556 ipr_log_array_error(ioa_cfg, hostrcb);
2558 case IPR_HOST_RCB_OVERLAY_ID_7:
2559 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2561 case IPR_HOST_RCB_OVERLAY_ID_12:
2562 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2564 case IPR_HOST_RCB_OVERLAY_ID_13:
2565 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2567 case IPR_HOST_RCB_OVERLAY_ID_14:
2568 case IPR_HOST_RCB_OVERLAY_ID_16:
2569 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2571 case IPR_HOST_RCB_OVERLAY_ID_17:
2572 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2574 case IPR_HOST_RCB_OVERLAY_ID_20:
2575 ipr_log_fabric_error(ioa_cfg, hostrcb);
2577 case IPR_HOST_RCB_OVERLAY_ID_21:
2578 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2580 case IPR_HOST_RCB_OVERLAY_ID_23:
2581 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2583 case IPR_HOST_RCB_OVERLAY_ID_24:
2584 case IPR_HOST_RCB_OVERLAY_ID_26:
2585 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2587 case IPR_HOST_RCB_OVERLAY_ID_30:
2588 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2590 case IPR_HOST_RCB_OVERLAY_ID_1:
2591 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2593 ipr_log_generic_error(ioa_cfg, hostrcb);
2598 static struct ipr_hostrcb *ipr_get_free_hostrcb(struct ipr_ioa_cfg *ioa)
2600 struct ipr_hostrcb *hostrcb;
2602 hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q,
2603 struct ipr_hostrcb, queue);
2605 if (unlikely(!hostrcb)) {
2606 dev_info(&ioa->pdev->dev, "Reclaiming async error buffers.");
2607 hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q,
2608 struct ipr_hostrcb, queue);
2611 list_del_init(&hostrcb->queue);
2616 * ipr_process_error - Op done function for an adapter error log.
2617 * @ipr_cmd: ipr command struct
2619 * This function is the op done function for an error log host
2620 * controlled async from the adapter. It will log the error and
2621 * send the HCAM back to the adapter.
2626 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2628 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2629 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2630 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2634 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2636 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2638 list_del_init(&hostrcb->queue);
2639 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2642 ipr_handle_log_data(ioa_cfg, hostrcb);
2643 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2644 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2645 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
2646 ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
2647 dev_err(&ioa_cfg->pdev->dev,
2648 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2651 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
2652 schedule_work(&ioa_cfg->work_q);
2653 hostrcb = ipr_get_free_hostrcb(ioa_cfg);
2655 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2659 * ipr_timeout - An internally generated op has timed out.
2660 * @ipr_cmd: ipr command struct
2662 * This function blocks host requests and initiates an
2668 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2670 unsigned long lock_flags = 0;
2671 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2674 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2676 ioa_cfg->errors_logged++;
2677 dev_err(&ioa_cfg->pdev->dev,
2678 "Adapter being reset due to command timeout.\n");
2680 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2681 ioa_cfg->sdt_state = GET_DUMP;
2683 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2684 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2686 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2691 * ipr_oper_timeout - Adapter timed out transitioning to operational
2692 * @ipr_cmd: ipr command struct
2694 * This function blocks host requests and initiates an
2700 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2702 unsigned long lock_flags = 0;
2703 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2706 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2708 ioa_cfg->errors_logged++;
2709 dev_err(&ioa_cfg->pdev->dev,
2710 "Adapter timed out transitioning to operational.\n");
2712 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2713 ioa_cfg->sdt_state = GET_DUMP;
2715 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2717 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2718 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2721 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2726 * ipr_find_ses_entry - Find matching SES in SES table
2727 * @res: resource entry struct of SES
2730 * pointer to SES table entry / NULL on failure
2732 static const struct ipr_ses_table_entry *
2733 ipr_find_ses_entry(struct ipr_resource_entry *res)
2736 struct ipr_std_inq_vpids *vpids;
2737 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2739 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2740 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2741 if (ste->compare_product_id_byte[j] == 'X') {
2742 vpids = &res->std_inq_data.vpids;
2743 if (vpids->product_id[j] == ste->product_id[j])
2751 if (matches == IPR_PROD_ID_LEN)
2759 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2760 * @ioa_cfg: ioa config struct
2762 * @bus_width: bus width
2765 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2766 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2767 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2768 * max 160MHz = max 320MB/sec).
2770 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2772 struct ipr_resource_entry *res;
2773 const struct ipr_ses_table_entry *ste;
2774 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2776 /* Loop through each config table entry in the config table buffer */
2777 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2778 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2781 if (bus != res->bus)
2784 if (!(ste = ipr_find_ses_entry(res)))
2787 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2790 return max_xfer_rate;
2794 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2795 * @ioa_cfg: ioa config struct
2796 * @max_delay: max delay in micro-seconds to wait
2798 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2801 * 0 on success / other on failure
2803 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2805 volatile u32 pcii_reg;
2808 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2809 while (delay < max_delay) {
2810 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2812 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2815 /* udelay cannot be used if delay is more than a few milliseconds */
2816 if ((delay / 1000) > MAX_UDELAY_MS)
2817 mdelay(delay / 1000);
2827 * ipr_get_sis64_dump_data_section - Dump IOA memory
2828 * @ioa_cfg: ioa config struct
2829 * @start_addr: adapter address to dump
2830 * @dest: destination kernel buffer
2831 * @length_in_words: length to dump in 4 byte words
2836 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2838 __be32 *dest, u32 length_in_words)
2842 for (i = 0; i < length_in_words; i++) {
2843 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2844 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2852 * ipr_get_ldump_data_section - Dump IOA memory
2853 * @ioa_cfg: ioa config struct
2854 * @start_addr: adapter address to dump
2855 * @dest: destination kernel buffer
2856 * @length_in_words: length to dump in 4 byte words
2859 * 0 on success / -EIO on failure
2861 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2863 __be32 *dest, u32 length_in_words)
2865 volatile u32 temp_pcii_reg;
2869 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2870 dest, length_in_words);
2872 /* Write IOA interrupt reg starting LDUMP state */
2873 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2874 ioa_cfg->regs.set_uproc_interrupt_reg32);
2876 /* Wait for IO debug acknowledge */
2877 if (ipr_wait_iodbg_ack(ioa_cfg,
2878 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2879 dev_err(&ioa_cfg->pdev->dev,
2880 "IOA dump long data transfer timeout\n");
2884 /* Signal LDUMP interlocked - clear IO debug ack */
2885 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2886 ioa_cfg->regs.clr_interrupt_reg);
2888 /* Write Mailbox with starting address */
2889 writel(start_addr, ioa_cfg->ioa_mailbox);
2891 /* Signal address valid - clear IOA Reset alert */
2892 writel(IPR_UPROCI_RESET_ALERT,
2893 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2895 for (i = 0; i < length_in_words; i++) {
2896 /* Wait for IO debug acknowledge */
2897 if (ipr_wait_iodbg_ack(ioa_cfg,
2898 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2899 dev_err(&ioa_cfg->pdev->dev,
2900 "IOA dump short data transfer timeout\n");
2904 /* Read data from mailbox and increment destination pointer */
2905 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2908 /* For all but the last word of data, signal data received */
2909 if (i < (length_in_words - 1)) {
2910 /* Signal dump data received - Clear IO debug Ack */
2911 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2912 ioa_cfg->regs.clr_interrupt_reg);
2916 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2917 writel(IPR_UPROCI_RESET_ALERT,
2918 ioa_cfg->regs.set_uproc_interrupt_reg32);
2920 writel(IPR_UPROCI_IO_DEBUG_ALERT,
2921 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2923 /* Signal dump data received - Clear IO debug Ack */
2924 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2925 ioa_cfg->regs.clr_interrupt_reg);
2927 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2928 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2930 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2932 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2942 #ifdef CONFIG_SCSI_IPR_DUMP
2944 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2945 * @ioa_cfg: ioa config struct
2946 * @pci_address: adapter address
2947 * @length: length of data to copy
2949 * Copy data from PCI adapter to kernel buffer.
2950 * Note: length MUST be a 4 byte multiple
2952 * 0 on success / other on failure
2954 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2955 unsigned long pci_address, u32 length)
2957 int bytes_copied = 0;
2958 int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2960 unsigned long lock_flags = 0;
2961 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2964 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2966 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2968 while (bytes_copied < length &&
2969 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2970 if (ioa_dump->page_offset >= PAGE_SIZE ||
2971 ioa_dump->page_offset == 0) {
2972 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2976 return bytes_copied;
2979 ioa_dump->page_offset = 0;
2980 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2981 ioa_dump->next_page_index++;
2983 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2985 rem_len = length - bytes_copied;
2986 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2987 cur_len = min(rem_len, rem_page_len);
2989 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2990 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2993 rc = ipr_get_ldump_data_section(ioa_cfg,
2994 pci_address + bytes_copied,
2995 &page[ioa_dump->page_offset / 4],
2996 (cur_len / sizeof(u32)));
2998 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3001 ioa_dump->page_offset += cur_len;
3002 bytes_copied += cur_len;
3010 return bytes_copied;
3014 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
3015 * @hdr: dump entry header struct
3020 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
3022 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
3024 hdr->offset = sizeof(*hdr);
3025 hdr->status = IPR_DUMP_STATUS_SUCCESS;
3029 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
3030 * @ioa_cfg: ioa config struct
3031 * @driver_dump: driver dump struct
3036 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
3037 struct ipr_driver_dump *driver_dump)
3039 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3041 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
3042 driver_dump->ioa_type_entry.hdr.len =
3043 sizeof(struct ipr_dump_ioa_type_entry) -
3044 sizeof(struct ipr_dump_entry_header);
3045 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3046 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
3047 driver_dump->ioa_type_entry.type = ioa_cfg->type;
3048 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
3049 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
3050 ucode_vpd->minor_release[1];
3051 driver_dump->hdr.num_entries++;
3055 * ipr_dump_version_data - Fill in the driver version in the dump.
3056 * @ioa_cfg: ioa config struct
3057 * @driver_dump: driver dump struct
3062 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
3063 struct ipr_driver_dump *driver_dump)
3065 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
3066 driver_dump->version_entry.hdr.len =
3067 sizeof(struct ipr_dump_version_entry) -
3068 sizeof(struct ipr_dump_entry_header);
3069 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3070 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
3071 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
3072 driver_dump->hdr.num_entries++;
3076 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3077 * @ioa_cfg: ioa config struct
3078 * @driver_dump: driver dump struct
3083 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3084 struct ipr_driver_dump *driver_dump)
3086 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3087 driver_dump->trace_entry.hdr.len =
3088 sizeof(struct ipr_dump_trace_entry) -
3089 sizeof(struct ipr_dump_entry_header);
3090 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3091 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3092 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3093 driver_dump->hdr.num_entries++;
3097 * ipr_dump_location_data - Fill in the IOA location in the dump.
3098 * @ioa_cfg: ioa config struct
3099 * @driver_dump: driver dump struct
3104 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3105 struct ipr_driver_dump *driver_dump)
3107 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3108 driver_dump->location_entry.hdr.len =
3109 sizeof(struct ipr_dump_location_entry) -
3110 sizeof(struct ipr_dump_entry_header);
3111 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3112 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
3113 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
3114 driver_dump->hdr.num_entries++;
3118 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3119 * @ioa_cfg: ioa config struct
3120 * @dump: dump struct
3125 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3127 unsigned long start_addr, sdt_word;
3128 unsigned long lock_flags = 0;
3129 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3130 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
3131 u32 num_entries, max_num_entries, start_off, end_off;
3132 u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
3133 struct ipr_sdt *sdt;
3139 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3141 if (ioa_cfg->sdt_state != READ_DUMP) {
3142 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3146 if (ioa_cfg->sis64) {
3147 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3148 ssleep(IPR_DUMP_DELAY_SECONDS);
3149 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3152 start_addr = readl(ioa_cfg->ioa_mailbox);
3154 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
3155 dev_err(&ioa_cfg->pdev->dev,
3156 "Invalid dump table format: %lx\n", start_addr);
3157 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3161 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3163 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3165 /* Initialize the overall dump header */
3166 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3167 driver_dump->hdr.num_entries = 1;
3168 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3169 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3170 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3171 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3173 ipr_dump_version_data(ioa_cfg, driver_dump);
3174 ipr_dump_location_data(ioa_cfg, driver_dump);
3175 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3176 ipr_dump_trace_data(ioa_cfg, driver_dump);
3178 /* Update dump_header */
3179 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3181 /* IOA Dump entry */
3182 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3183 ioa_dump->hdr.len = 0;
3184 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3185 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3187 /* First entries in sdt are actually a list of dump addresses and
3188 lengths to gather the real dump data. sdt represents the pointer
3189 to the ioa generated dump table. Dump data will be extracted based
3190 on entries in this table */
3191 sdt = &ioa_dump->sdt;
3193 if (ioa_cfg->sis64) {
3194 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3195 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3197 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3198 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3201 bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3202 (max_num_entries * sizeof(struct ipr_sdt_entry));
3203 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3204 bytes_to_copy / sizeof(__be32));
3206 /* Smart Dump table is ready to use and the first entry is valid */
3207 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3208 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3209 dev_err(&ioa_cfg->pdev->dev,
3210 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3211 rc, be32_to_cpu(sdt->hdr.state));
3212 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3213 ioa_cfg->sdt_state = DUMP_OBTAINED;
3214 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3218 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3220 if (num_entries > max_num_entries)
3221 num_entries = max_num_entries;
3223 /* Update dump length to the actual data to be copied */
3224 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3226 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3228 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3230 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3232 for (i = 0; i < num_entries; i++) {
3233 if (ioa_dump->hdr.len > max_dump_size) {
3234 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3238 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3239 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3241 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3243 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3244 end_off = be32_to_cpu(sdt->entry[i].end_token);
3246 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3247 bytes_to_copy = end_off - start_off;
3252 if (bytes_to_copy > max_dump_size) {
3253 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3257 /* Copy data from adapter to driver buffers */
3258 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3261 ioa_dump->hdr.len += bytes_copied;
3263 if (bytes_copied != bytes_to_copy) {
3264 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3271 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3273 /* Update dump_header */
3274 driver_dump->hdr.len += ioa_dump->hdr.len;
3276 ioa_cfg->sdt_state = DUMP_OBTAINED;
3281 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3285 * ipr_release_dump - Free adapter dump memory
3286 * @kref: kref struct
3291 static void ipr_release_dump(struct kref *kref)
3293 struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3294 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3295 unsigned long lock_flags = 0;
3299 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3300 ioa_cfg->dump = NULL;
3301 ioa_cfg->sdt_state = INACTIVE;
3302 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3304 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3305 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3307 vfree(dump->ioa_dump.ioa_data);
3312 static void ipr_add_remove_thread(struct work_struct *work)
3314 unsigned long lock_flags;
3315 struct ipr_resource_entry *res;
3316 struct scsi_device *sdev;
3317 struct ipr_ioa_cfg *ioa_cfg =
3318 container_of(work, struct ipr_ioa_cfg, scsi_add_work_q);
3319 u8 bus, target, lun;
3323 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3328 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
3329 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3333 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3334 if (res->del_from_ml && res->sdev) {
3337 if (!scsi_device_get(sdev)) {
3338 if (!res->add_to_ml)
3339 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3341 res->del_from_ml = 0;
3342 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3343 scsi_remove_device(sdev);
3344 scsi_device_put(sdev);
3345 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3352 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3353 if (res->add_to_ml) {
3355 target = res->target;
3358 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3359 scsi_add_device(ioa_cfg->host, bus, target, lun);
3360 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3365 ioa_cfg->scan_done = 1;
3366 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3367 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3372 * ipr_worker_thread - Worker thread
3373 * @work: ioa config struct
3375 * Called at task level from a work thread. This function takes care
3376 * of adding and removing device from the mid-layer as configuration
3377 * changes are detected by the adapter.
3382 static void ipr_worker_thread(struct work_struct *work)
3384 unsigned long lock_flags;
3385 struct ipr_dump *dump;
3386 struct ipr_ioa_cfg *ioa_cfg =
3387 container_of(work, struct ipr_ioa_cfg, work_q);
3390 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3392 if (ioa_cfg->sdt_state == READ_DUMP) {
3393 dump = ioa_cfg->dump;
3395 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3398 kref_get(&dump->kref);
3399 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3400 ipr_get_ioa_dump(ioa_cfg, dump);
3401 kref_put(&dump->kref, ipr_release_dump);
3403 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3404 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3405 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3406 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3410 if (ioa_cfg->scsi_unblock) {
3411 ioa_cfg->scsi_unblock = 0;
3412 ioa_cfg->scsi_blocked = 0;
3413 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3414 scsi_unblock_requests(ioa_cfg->host);
3415 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3416 if (ioa_cfg->scsi_blocked)
3417 scsi_block_requests(ioa_cfg->host);
3420 if (!ioa_cfg->scan_enabled) {
3421 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3425 schedule_work(&ioa_cfg->scsi_add_work_q);
3427 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3431 #ifdef CONFIG_SCSI_IPR_TRACE
3433 * ipr_read_trace - Dump the adapter trace
3434 * @filp: open sysfs file
3435 * @kobj: kobject struct
3436 * @bin_attr: bin_attribute struct
3439 * @count: buffer size
3442 * number of bytes printed to buffer
3444 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3445 struct bin_attribute *bin_attr,
3446 char *buf, loff_t off, size_t count)
3448 struct device *dev = container_of(kobj, struct device, kobj);
3449 struct Scsi_Host *shost = class_to_shost(dev);
3450 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3451 unsigned long lock_flags = 0;
3454 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3455 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3457 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3462 static struct bin_attribute ipr_trace_attr = {
3468 .read = ipr_read_trace,
3473 * ipr_show_fw_version - Show the firmware version
3474 * @dev: class device struct
3478 * number of bytes printed to buffer
3480 static ssize_t ipr_show_fw_version(struct device *dev,
3481 struct device_attribute *attr, char *buf)
3483 struct Scsi_Host *shost = class_to_shost(dev);
3484 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3485 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3486 unsigned long lock_flags = 0;
3489 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3490 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3491 ucode_vpd->major_release, ucode_vpd->card_type,
3492 ucode_vpd->minor_release[0],
3493 ucode_vpd->minor_release[1]);
3494 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3498 static struct device_attribute ipr_fw_version_attr = {
3500 .name = "fw_version",
3503 .show = ipr_show_fw_version,
3507 * ipr_show_log_level - Show the adapter's error logging level
3508 * @dev: class device struct
3512 * number of bytes printed to buffer
3514 static ssize_t ipr_show_log_level(struct device *dev,
3515 struct device_attribute *attr, char *buf)
3517 struct Scsi_Host *shost = class_to_shost(dev);
3518 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3519 unsigned long lock_flags = 0;
3522 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3523 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3524 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3529 * ipr_store_log_level - Change the adapter's error logging level
3530 * @dev: class device struct
3534 * number of bytes printed to buffer
3536 static ssize_t ipr_store_log_level(struct device *dev,
3537 struct device_attribute *attr,
3538 const char *buf, size_t count)
3540 struct Scsi_Host *shost = class_to_shost(dev);
3541 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3542 unsigned long lock_flags = 0;
3544 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3545 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3546 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3550 static struct device_attribute ipr_log_level_attr = {
3552 .name = "log_level",
3553 .mode = S_IRUGO | S_IWUSR,
3555 .show = ipr_show_log_level,
3556 .store = ipr_store_log_level
3560 * ipr_store_diagnostics - IOA Diagnostics interface
3561 * @dev: device struct
3563 * @count: buffer size
3565 * This function will reset the adapter and wait a reasonable
3566 * amount of time for any errors that the adapter might log.
3569 * count on success / other on failure
3571 static ssize_t ipr_store_diagnostics(struct device *dev,
3572 struct device_attribute *attr,
3573 const char *buf, size_t count)
3575 struct Scsi_Host *shost = class_to_shost(dev);
3576 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3577 unsigned long lock_flags = 0;
3580 if (!capable(CAP_SYS_ADMIN))
3583 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3584 while (ioa_cfg->in_reset_reload) {
3585 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3586 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3587 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3590 ioa_cfg->errors_logged = 0;
3591 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3593 if (ioa_cfg->in_reset_reload) {
3594 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3595 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3597 /* Wait for a second for any errors to be logged */
3600 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3604 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3605 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3607 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3612 static struct device_attribute ipr_diagnostics_attr = {
3614 .name = "run_diagnostics",
3617 .store = ipr_store_diagnostics
3621 * ipr_show_adapter_state - Show the adapter's state
3622 * @class_dev: device struct
3626 * number of bytes printed to buffer
3628 static ssize_t ipr_show_adapter_state(struct device *dev,
3629 struct device_attribute *attr, char *buf)
3631 struct Scsi_Host *shost = class_to_shost(dev);
3632 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3633 unsigned long lock_flags = 0;
3636 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3637 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3638 len = snprintf(buf, PAGE_SIZE, "offline\n");
3640 len = snprintf(buf, PAGE_SIZE, "online\n");
3641 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3646 * ipr_store_adapter_state - Change adapter state
3647 * @dev: device struct
3649 * @count: buffer size
3651 * This function will change the adapter's state.
3654 * count on success / other on failure
3656 static ssize_t ipr_store_adapter_state(struct device *dev,
3657 struct device_attribute *attr,
3658 const char *buf, size_t count)
3660 struct Scsi_Host *shost = class_to_shost(dev);
3661 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3662 unsigned long lock_flags;
3663 int result = count, i;
3665 if (!capable(CAP_SYS_ADMIN))
3668 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3669 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3670 !strncmp(buf, "online", 6)) {
3671 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3672 spin_lock(&ioa_cfg->hrrq[i]._lock);
3673 ioa_cfg->hrrq[i].ioa_is_dead = 0;
3674 spin_unlock(&ioa_cfg->hrrq[i]._lock);
3677 ioa_cfg->reset_retries = 0;
3678 ioa_cfg->in_ioa_bringdown = 0;
3679 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3681 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3682 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3687 static struct device_attribute ipr_ioa_state_attr = {
3689 .name = "online_state",
3690 .mode = S_IRUGO | S_IWUSR,
3692 .show = ipr_show_adapter_state,
3693 .store = ipr_store_adapter_state
3697 * ipr_store_reset_adapter - Reset the adapter
3698 * @dev: device struct
3700 * @count: buffer size
3702 * This function will reset the adapter.
3705 * count on success / other on failure
3707 static ssize_t ipr_store_reset_adapter(struct device *dev,
3708 struct device_attribute *attr,
3709 const char *buf, size_t count)
3711 struct Scsi_Host *shost = class_to_shost(dev);
3712 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3713 unsigned long lock_flags;
3716 if (!capable(CAP_SYS_ADMIN))
3719 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3720 if (!ioa_cfg->in_reset_reload)
3721 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3722 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3723 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3728 static struct device_attribute ipr_ioa_reset_attr = {
3730 .name = "reset_host",
3733 .store = ipr_store_reset_adapter
3736 static int ipr_iopoll(struct irq_poll *iop, int budget);
3738 * ipr_show_iopoll_weight - Show ipr polling mode
3739 * @dev: class device struct
3743 * number of bytes printed to buffer
3745 static ssize_t ipr_show_iopoll_weight(struct device *dev,
3746 struct device_attribute *attr, char *buf)
3748 struct Scsi_Host *shost = class_to_shost(dev);
3749 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3750 unsigned long lock_flags = 0;
3753 spin_lock_irqsave(shost->host_lock, lock_flags);
3754 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3755 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3761 * ipr_store_iopoll_weight - Change the adapter's polling mode
3762 * @dev: class device struct
3766 * number of bytes printed to buffer
3768 static ssize_t ipr_store_iopoll_weight(struct device *dev,
3769 struct device_attribute *attr,
3770 const char *buf, size_t count)
3772 struct Scsi_Host *shost = class_to_shost(dev);
3773 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3774 unsigned long user_iopoll_weight;
3775 unsigned long lock_flags = 0;
3778 if (!ioa_cfg->sis64) {
3779 dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n");
3782 if (kstrtoul(buf, 10, &user_iopoll_weight))
3785 if (user_iopoll_weight > 256) {
3786 dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n");
3790 if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3791 dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n");
3795 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3796 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3797 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
3800 spin_lock_irqsave(shost->host_lock, lock_flags);
3801 ioa_cfg->iopoll_weight = user_iopoll_weight;
3802 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3803 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3804 irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
3805 ioa_cfg->iopoll_weight, ipr_iopoll);
3808 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3813 static struct device_attribute ipr_iopoll_weight_attr = {
3815 .name = "iopoll_weight",
3816 .mode = S_IRUGO | S_IWUSR,
3818 .show = ipr_show_iopoll_weight,
3819 .store = ipr_store_iopoll_weight
3823 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3824 * @buf_len: buffer length
3826 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3827 * list to use for microcode download
3830 * pointer to sglist / NULL on failure
3832 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3834 int sg_size, order, bsize_elem, num_elem, i, j;
3835 struct ipr_sglist *sglist;
3836 struct scatterlist *scatterlist;
3839 /* Get the minimum size per scatter/gather element */
3840 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3842 /* Get the actual size per element */
3843 order = get_order(sg_size);
3845 /* Determine the actual number of bytes per element */
3846 bsize_elem = PAGE_SIZE * (1 << order);
3848 /* Determine the actual number of sg entries needed */
3849 if (buf_len % bsize_elem)
3850 num_elem = (buf_len / bsize_elem) + 1;
3852 num_elem = buf_len / bsize_elem;
3854 /* Allocate a scatter/gather list for the DMA */
3855 sglist = kzalloc(sizeof(struct ipr_sglist) +
3856 (sizeof(struct scatterlist) * (num_elem - 1)),
3859 if (sglist == NULL) {
3864 scatterlist = sglist->scatterlist;
3865 sg_init_table(scatterlist, num_elem);
3867 sglist->order = order;
3868 sglist->num_sg = num_elem;
3870 /* Allocate a bunch of sg elements */
3871 for (i = 0; i < num_elem; i++) {
3872 page = alloc_pages(GFP_KERNEL, order);
3876 /* Free up what we already allocated */
3877 for (j = i - 1; j >= 0; j--)
3878 __free_pages(sg_page(&scatterlist[j]), order);
3883 sg_set_page(&scatterlist[i], page, 0, 0);
3890 * ipr_free_ucode_buffer - Frees a microcode download buffer
3891 * @p_dnld: scatter/gather list pointer
3893 * Free a DMA'able ucode download buffer previously allocated with
3894 * ipr_alloc_ucode_buffer
3899 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3903 for (i = 0; i < sglist->num_sg; i++)
3904 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3910 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3911 * @sglist: scatter/gather list pointer
3912 * @buffer: buffer pointer
3913 * @len: buffer length
3915 * Copy a microcode image from a user buffer into a buffer allocated by
3916 * ipr_alloc_ucode_buffer
3919 * 0 on success / other on failure
3921 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3922 u8 *buffer, u32 len)
3924 int bsize_elem, i, result = 0;
3925 struct scatterlist *scatterlist;
3928 /* Determine the actual number of bytes per element */
3929 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3931 scatterlist = sglist->scatterlist;
3933 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3934 struct page *page = sg_page(&scatterlist[i]);
3937 memcpy(kaddr, buffer, bsize_elem);
3940 scatterlist[i].length = bsize_elem;
3948 if (len % bsize_elem) {
3949 struct page *page = sg_page(&scatterlist[i]);
3952 memcpy(kaddr, buffer, len % bsize_elem);
3955 scatterlist[i].length = len % bsize_elem;
3958 sglist->buffer_len = len;
3963 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3964 * @ipr_cmd: ipr command struct
3965 * @sglist: scatter/gather list
3967 * Builds a microcode download IOA data list (IOADL).
3970 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3971 struct ipr_sglist *sglist)
3973 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3974 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3975 struct scatterlist *scatterlist = sglist->scatterlist;
3978 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3979 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3980 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3983 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3984 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3985 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3986 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3987 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3990 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3994 * ipr_build_ucode_ioadl - Build a microcode download IOADL
3995 * @ipr_cmd: ipr command struct
3996 * @sglist: scatter/gather list
3998 * Builds a microcode download IOA data list (IOADL).
4001 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
4002 struct ipr_sglist *sglist)
4004 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4005 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
4006 struct scatterlist *scatterlist = sglist->scatterlist;
4009 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
4010 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4011 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
4014 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4016 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
4017 ioadl[i].flags_and_data_len =
4018 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
4020 cpu_to_be32(sg_dma_address(&scatterlist[i]));
4023 ioadl[i-1].flags_and_data_len |=
4024 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4028 * ipr_update_ioa_ucode - Update IOA's microcode
4029 * @ioa_cfg: ioa config struct
4030 * @sglist: scatter/gather list
4032 * Initiate an adapter reset to update the IOA's microcode
4035 * 0 on success / -EIO on failure
4037 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
4038 struct ipr_sglist *sglist)
4040 unsigned long lock_flags;
4042 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4043 while (ioa_cfg->in_reset_reload) {
4044 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4045 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4046 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4049 if (ioa_cfg->ucode_sglist) {
4050 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4051 dev_err(&ioa_cfg->pdev->dev,
4052 "Microcode download already in progress\n");
4056 sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
4057 sglist->scatterlist, sglist->num_sg,
4060 if (!sglist->num_dma_sg) {
4061 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4062 dev_err(&ioa_cfg->pdev->dev,
4063 "Failed to map microcode download buffer!\n");
4067 ioa_cfg->ucode_sglist = sglist;
4068 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
4069 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4070 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4072 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4073 ioa_cfg->ucode_sglist = NULL;
4074 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4079 * ipr_store_update_fw - Update the firmware on the adapter
4080 * @class_dev: device struct
4082 * @count: buffer size
4084 * This function will update the firmware on the adapter.
4087 * count on success / other on failure
4089 static ssize_t ipr_store_update_fw(struct device *dev,
4090 struct device_attribute *attr,
4091 const char *buf, size_t count)
4093 struct Scsi_Host *shost = class_to_shost(dev);
4094 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4095 struct ipr_ucode_image_header *image_hdr;
4096 const struct firmware *fw_entry;
4097 struct ipr_sglist *sglist;
4101 int result, dnld_size;
4103 if (!capable(CAP_SYS_ADMIN))
4106 snprintf(fname, sizeof(fname), "%s", buf);
4108 endline = strchr(fname, '\n');
4112 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
4113 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4117 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4119 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4120 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4121 sglist = ipr_alloc_ucode_buffer(dnld_size);
4124 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4125 release_firmware(fw_entry);
4129 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4132 dev_err(&ioa_cfg->pdev->dev,
4133 "Microcode buffer copy to DMA buffer failed\n");
4137 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
4139 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
4144 ipr_free_ucode_buffer(sglist);
4145 release_firmware(fw_entry);
4149 static struct device_attribute ipr_update_fw_attr = {
4151 .name = "update_fw",
4154 .store = ipr_store_update_fw
4158 * ipr_show_fw_type - Show the adapter's firmware type.
4159 * @dev: class device struct
4163 * number of bytes printed to buffer
4165 static ssize_t ipr_show_fw_type(struct device *dev,
4166 struct device_attribute *attr, char *buf)
4168 struct Scsi_Host *shost = class_to_shost(dev);
4169 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4170 unsigned long lock_flags = 0;
4173 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4174 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4175 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4179 static struct device_attribute ipr_ioa_fw_type_attr = {
4184 .show = ipr_show_fw_type
4187 static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
4188 struct bin_attribute *bin_attr, char *buf,
4189 loff_t off, size_t count)
4191 struct device *cdev = container_of(kobj, struct device, kobj);
4192 struct Scsi_Host *shost = class_to_shost(cdev);
4193 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4194 struct ipr_hostrcb *hostrcb;
4195 unsigned long lock_flags = 0;
4198 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4199 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4200 struct ipr_hostrcb, queue);
4202 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4205 ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam,
4206 sizeof(hostrcb->hcam));
4207 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4211 static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
4212 struct bin_attribute *bin_attr, char *buf,
4213 loff_t off, size_t count)
4215 struct device *cdev = container_of(kobj, struct device, kobj);
4216 struct Scsi_Host *shost = class_to_shost(cdev);
4217 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4218 struct ipr_hostrcb *hostrcb;
4219 unsigned long lock_flags = 0;
4221 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4222 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4223 struct ipr_hostrcb, queue);
4225 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4229 /* Reclaim hostrcb before exit */
4230 list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
4231 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4235 static struct bin_attribute ipr_ioa_async_err_log = {
4237 .name = "async_err_log",
4238 .mode = S_IRUGO | S_IWUSR,
4241 .read = ipr_read_async_err_log,
4242 .write = ipr_next_async_err_log
4245 static struct device_attribute *ipr_ioa_attrs[] = {
4246 &ipr_fw_version_attr,
4247 &ipr_log_level_attr,
4248 &ipr_diagnostics_attr,
4249 &ipr_ioa_state_attr,
4250 &ipr_ioa_reset_attr,
4251 &ipr_update_fw_attr,
4252 &ipr_ioa_fw_type_attr,
4253 &ipr_iopoll_weight_attr,
4257 #ifdef CONFIG_SCSI_IPR_DUMP
4259 * ipr_read_dump - Dump the adapter
4260 * @filp: open sysfs file
4261 * @kobj: kobject struct
4262 * @bin_attr: bin_attribute struct
4265 * @count: buffer size
4268 * number of bytes printed to buffer
4270 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4271 struct bin_attribute *bin_attr,
4272 char *buf, loff_t off, size_t count)
4274 struct device *cdev = container_of(kobj, struct device, kobj);
4275 struct Scsi_Host *shost = class_to_shost(cdev);
4276 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4277 struct ipr_dump *dump;
4278 unsigned long lock_flags = 0;
4283 if (!capable(CAP_SYS_ADMIN))
4286 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4287 dump = ioa_cfg->dump;
4289 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4290 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4293 kref_get(&dump->kref);
4294 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4296 if (off > dump->driver_dump.hdr.len) {
4297 kref_put(&dump->kref, ipr_release_dump);
4301 if (off + count > dump->driver_dump.hdr.len) {
4302 count = dump->driver_dump.hdr.len - off;
4306 if (count && off < sizeof(dump->driver_dump)) {
4307 if (off + count > sizeof(dump->driver_dump))
4308 len = sizeof(dump->driver_dump) - off;
4311 src = (u8 *)&dump->driver_dump + off;
4312 memcpy(buf, src, len);
4318 off -= sizeof(dump->driver_dump);
4321 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4322 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4323 sizeof(struct ipr_sdt_entry));
4325 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4326 (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4328 if (count && off < sdt_end) {
4329 if (off + count > sdt_end)
4330 len = sdt_end - off;
4333 src = (u8 *)&dump->ioa_dump + off;
4334 memcpy(buf, src, len);
4343 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4344 len = PAGE_ALIGN(off) - off;
4347 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4348 src += off & ~PAGE_MASK;
4349 memcpy(buf, src, len);
4355 kref_put(&dump->kref, ipr_release_dump);
4360 * ipr_alloc_dump - Prepare for adapter dump
4361 * @ioa_cfg: ioa config struct
4364 * 0 on success / other on failure
4366 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4368 struct ipr_dump *dump;
4370 unsigned long lock_flags = 0;
4372 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4375 ipr_err("Dump memory allocation failed\n");
4380 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4382 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4385 ipr_err("Dump memory allocation failed\n");
4390 dump->ioa_dump.ioa_data = ioa_data;
4392 kref_init(&dump->kref);
4393 dump->ioa_cfg = ioa_cfg;
4395 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4397 if (INACTIVE != ioa_cfg->sdt_state) {
4398 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4399 vfree(dump->ioa_dump.ioa_data);
4404 ioa_cfg->dump = dump;
4405 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4406 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4407 ioa_cfg->dump_taken = 1;
4408 schedule_work(&ioa_cfg->work_q);
4410 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4416 * ipr_free_dump - Free adapter dump memory
4417 * @ioa_cfg: ioa config struct
4420 * 0 on success / other on failure
4422 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4424 struct ipr_dump *dump;
4425 unsigned long lock_flags = 0;
4429 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4430 dump = ioa_cfg->dump;
4432 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4436 ioa_cfg->dump = NULL;
4437 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4439 kref_put(&dump->kref, ipr_release_dump);
4446 * ipr_write_dump - Setup dump state of adapter
4447 * @filp: open sysfs file
4448 * @kobj: kobject struct
4449 * @bin_attr: bin_attribute struct
4452 * @count: buffer size
4455 * number of bytes printed to buffer
4457 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4458 struct bin_attribute *bin_attr,
4459 char *buf, loff_t off, size_t count)
4461 struct device *cdev = container_of(kobj, struct device, kobj);
4462 struct Scsi_Host *shost = class_to_shost(cdev);
4463 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4466 if (!capable(CAP_SYS_ADMIN))
4470 rc = ipr_alloc_dump(ioa_cfg);
4471 else if (buf[0] == '0')
4472 rc = ipr_free_dump(ioa_cfg);
4482 static struct bin_attribute ipr_dump_attr = {
4485 .mode = S_IRUSR | S_IWUSR,
4488 .read = ipr_read_dump,
4489 .write = ipr_write_dump
4492 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4496 * ipr_change_queue_depth - Change the device's queue depth
4497 * @sdev: scsi device struct
4498 * @qdepth: depth to set
4499 * @reason: calling context
4504 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
4506 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4507 struct ipr_resource_entry *res;
4508 unsigned long lock_flags = 0;
4510 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4511 res = (struct ipr_resource_entry *)sdev->hostdata;
4513 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4514 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4515 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4517 scsi_change_queue_depth(sdev, qdepth);
4518 return sdev->queue_depth;
4522 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4523 * @dev: device struct
4524 * @attr: device attribute structure
4528 * number of bytes printed to buffer
4530 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4532 struct scsi_device *sdev = to_scsi_device(dev);
4533 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4534 struct ipr_resource_entry *res;
4535 unsigned long lock_flags = 0;
4536 ssize_t len = -ENXIO;
4538 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4539 res = (struct ipr_resource_entry *)sdev->hostdata;
4541 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4542 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4546 static struct device_attribute ipr_adapter_handle_attr = {
4548 .name = "adapter_handle",
4551 .show = ipr_show_adapter_handle
4555 * ipr_show_resource_path - Show the resource path or the resource address for
4557 * @dev: device struct
4558 * @attr: device attribute structure
4562 * number of bytes printed to buffer
4564 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4566 struct scsi_device *sdev = to_scsi_device(dev);
4567 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4568 struct ipr_resource_entry *res;
4569 unsigned long lock_flags = 0;
4570 ssize_t len = -ENXIO;
4571 char buffer[IPR_MAX_RES_PATH_LENGTH];
4573 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4574 res = (struct ipr_resource_entry *)sdev->hostdata;
4575 if (res && ioa_cfg->sis64)
4576 len = snprintf(buf, PAGE_SIZE, "%s\n",
4577 __ipr_format_res_path(res->res_path, buffer,
4580 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4581 res->bus, res->target, res->lun);
4583 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4587 static struct device_attribute ipr_resource_path_attr = {
4589 .name = "resource_path",
4592 .show = ipr_show_resource_path
4596 * ipr_show_device_id - Show the device_id for this device.
4597 * @dev: device struct
4598 * @attr: device attribute structure
4602 * number of bytes printed to buffer
4604 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4606 struct scsi_device *sdev = to_scsi_device(dev);
4607 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4608 struct ipr_resource_entry *res;
4609 unsigned long lock_flags = 0;
4610 ssize_t len = -ENXIO;
4612 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4613 res = (struct ipr_resource_entry *)sdev->hostdata;
4614 if (res && ioa_cfg->sis64)
4615 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id));
4617 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4619 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4623 static struct device_attribute ipr_device_id_attr = {
4625 .name = "device_id",
4628 .show = ipr_show_device_id
4632 * ipr_show_resource_type - Show the resource type for this device.
4633 * @dev: device struct
4634 * @attr: device attribute structure
4638 * number of bytes printed to buffer
4640 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4642 struct scsi_device *sdev = to_scsi_device(dev);
4643 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4644 struct ipr_resource_entry *res;
4645 unsigned long lock_flags = 0;
4646 ssize_t len = -ENXIO;
4648 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4649 res = (struct ipr_resource_entry *)sdev->hostdata;
4652 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4654 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4658 static struct device_attribute ipr_resource_type_attr = {
4660 .name = "resource_type",
4663 .show = ipr_show_resource_type
4667 * ipr_show_raw_mode - Show the adapter's raw mode
4668 * @dev: class device struct
4672 * number of bytes printed to buffer
4674 static ssize_t ipr_show_raw_mode(struct device *dev,
4675 struct device_attribute *attr, char *buf)
4677 struct scsi_device *sdev = to_scsi_device(dev);
4678 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4679 struct ipr_resource_entry *res;
4680 unsigned long lock_flags = 0;
4683 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4684 res = (struct ipr_resource_entry *)sdev->hostdata;
4686 len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
4689 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4694 * ipr_store_raw_mode - Change the adapter's raw mode
4695 * @dev: class device struct
4699 * number of bytes printed to buffer
4701 static ssize_t ipr_store_raw_mode(struct device *dev,
4702 struct device_attribute *attr,
4703 const char *buf, size_t count)
4705 struct scsi_device *sdev = to_scsi_device(dev);
4706 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4707 struct ipr_resource_entry *res;
4708 unsigned long lock_flags = 0;
4711 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4712 res = (struct ipr_resource_entry *)sdev->hostdata;
4714 if (ipr_is_af_dasd_device(res)) {
4715 res->raw_mode = simple_strtoul(buf, NULL, 10);
4718 sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
4719 res->raw_mode ? "enabled" : "disabled");
4724 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4728 static struct device_attribute ipr_raw_mode_attr = {
4731 .mode = S_IRUGO | S_IWUSR,
4733 .show = ipr_show_raw_mode,
4734 .store = ipr_store_raw_mode
4737 static struct device_attribute *ipr_dev_attrs[] = {
4738 &ipr_adapter_handle_attr,
4739 &ipr_resource_path_attr,
4740 &ipr_device_id_attr,
4741 &ipr_resource_type_attr,
4747 * ipr_biosparam - Return the HSC mapping
4748 * @sdev: scsi device struct
4749 * @block_device: block device pointer
4750 * @capacity: capacity of the device
4751 * @parm: Array containing returned HSC values.
4753 * This function generates the HSC parms that fdisk uses.
4754 * We want to make sure we return something that places partitions
4755 * on 4k boundaries for best performance with the IOA.
4760 static int ipr_biosparam(struct scsi_device *sdev,
4761 struct block_device *block_device,
4762 sector_t capacity, int *parm)
4770 cylinders = capacity;
4771 sector_div(cylinders, (128 * 32));
4776 parm[2] = cylinders;
4782 * ipr_find_starget - Find target based on bus/target.
4783 * @starget: scsi target struct
4786 * resource entry pointer if found / NULL if not found
4788 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4790 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4791 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4792 struct ipr_resource_entry *res;
4794 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4795 if ((res->bus == starget->channel) &&
4796 (res->target == starget->id)) {
4804 static struct ata_port_info sata_port_info;
4807 * ipr_target_alloc - Prepare for commands to a SCSI target
4808 * @starget: scsi target struct
4810 * If the device is a SATA device, this function allocates an
4811 * ATA port with libata, else it does nothing.
4814 * 0 on success / non-0 on failure
4816 static int ipr_target_alloc(struct scsi_target *starget)
4818 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4819 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4820 struct ipr_sata_port *sata_port;
4821 struct ata_port *ap;
4822 struct ipr_resource_entry *res;
4823 unsigned long lock_flags;
4825 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4826 res = ipr_find_starget(starget);
4827 starget->hostdata = NULL;
4829 if (res && ipr_is_gata(res)) {
4830 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4831 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4835 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4837 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4838 sata_port->ioa_cfg = ioa_cfg;
4840 sata_port->res = res;
4842 res->sata_port = sata_port;
4843 ap->private_data = sata_port;
4844 starget->hostdata = sata_port;
4850 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4856 * ipr_target_destroy - Destroy a SCSI target
4857 * @starget: scsi target struct
4859 * If the device was a SATA device, this function frees the libata
4860 * ATA port, else it does nothing.
4863 static void ipr_target_destroy(struct scsi_target *starget)
4865 struct ipr_sata_port *sata_port = starget->hostdata;
4866 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4867 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4869 if (ioa_cfg->sis64) {
4870 if (!ipr_find_starget(starget)) {
4871 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4872 clear_bit(starget->id, ioa_cfg->array_ids);
4873 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4874 clear_bit(starget->id, ioa_cfg->vset_ids);
4875 else if (starget->channel == 0)
4876 clear_bit(starget->id, ioa_cfg->target_ids);
4881 starget->hostdata = NULL;
4882 ata_sas_port_destroy(sata_port->ap);
4888 * ipr_find_sdev - Find device based on bus/target/lun.
4889 * @sdev: scsi device struct
4892 * resource entry pointer if found / NULL if not found
4894 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4896 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4897 struct ipr_resource_entry *res;
4899 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4900 if ((res->bus == sdev->channel) &&
4901 (res->target == sdev->id) &&
4902 (res->lun == sdev->lun))
4910 * ipr_slave_destroy - Unconfigure a SCSI device
4911 * @sdev: scsi device struct
4916 static void ipr_slave_destroy(struct scsi_device *sdev)
4918 struct ipr_resource_entry *res;
4919 struct ipr_ioa_cfg *ioa_cfg;
4920 unsigned long lock_flags = 0;
4922 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4924 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4925 res = (struct ipr_resource_entry *) sdev->hostdata;
4928 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4929 sdev->hostdata = NULL;
4931 res->sata_port = NULL;
4933 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4937 * ipr_slave_configure - Configure a SCSI device
4938 * @sdev: scsi device struct
4940 * This function configures the specified scsi device.
4945 static int ipr_slave_configure(struct scsi_device *sdev)
4947 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4948 struct ipr_resource_entry *res;
4949 struct ata_port *ap = NULL;
4950 unsigned long lock_flags = 0;
4951 char buffer[IPR_MAX_RES_PATH_LENGTH];
4953 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4954 res = sdev->hostdata;
4956 if (ipr_is_af_dasd_device(res))
4957 sdev->type = TYPE_RAID;
4958 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4959 sdev->scsi_level = 4;
4960 sdev->no_uld_attach = 1;
4962 if (ipr_is_vset_device(res)) {
4963 sdev->scsi_level = SCSI_SPC_3;
4964 sdev->no_report_opcodes = 1;
4965 blk_queue_rq_timeout(sdev->request_queue,
4966 IPR_VSET_RW_TIMEOUT);
4967 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4969 if (ipr_is_gata(res) && res->sata_port)
4970 ap = res->sata_port->ap;
4971 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4974 scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
4975 ata_sas_slave_configure(sdev, ap);
4979 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4980 ipr_format_res_path(ioa_cfg,
4981 res->res_path, buffer, sizeof(buffer)));
4984 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4989 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4990 * @sdev: scsi device struct
4992 * This function initializes an ATA port so that future commands
4993 * sent through queuecommand will work.
4998 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
5000 struct ipr_sata_port *sata_port = NULL;
5004 if (sdev->sdev_target)
5005 sata_port = sdev->sdev_target->hostdata;
5007 rc = ata_sas_port_init(sata_port->ap);
5009 rc = ata_sas_sync_probe(sata_port->ap);
5013 ipr_slave_destroy(sdev);
5020 * ipr_slave_alloc - Prepare for commands to a device.
5021 * @sdev: scsi device struct
5023 * This function saves a pointer to the resource entry
5024 * in the scsi device struct if the device exists. We
5025 * can then use this pointer in ipr_queuecommand when
5026 * handling new commands.
5029 * 0 on success / -ENXIO if device does not exist
5031 static int ipr_slave_alloc(struct scsi_device *sdev)
5033 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
5034 struct ipr_resource_entry *res;
5035 unsigned long lock_flags;
5038 sdev->hostdata = NULL;
5040 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5042 res = ipr_find_sdev(sdev);
5047 sdev->hostdata = res;
5048 if (!ipr_is_naca_model(res))
5049 res->needs_sync_complete = 1;
5051 if (ipr_is_gata(res)) {
5052 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5053 return ipr_ata_slave_alloc(sdev);
5057 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5063 * ipr_match_lun - Match function for specified LUN
5064 * @ipr_cmd: ipr command struct
5065 * @device: device to match (sdev)
5068 * 1 if command matches sdev / 0 if command does not match sdev
5070 static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
5072 if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
5078 * ipr_cmnd_is_free - Check if a command is free or not
5079 * @ipr_cmd ipr command struct
5084 static bool ipr_cmnd_is_free(struct ipr_cmnd *ipr_cmd)
5086 struct ipr_cmnd *loop_cmd;
5088 list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) {
5089 if (loop_cmd == ipr_cmd)
5097 * ipr_match_res - Match function for specified resource entry
5098 * @ipr_cmd: ipr command struct
5099 * @resource: resource entry to match
5102 * 1 if command matches sdev / 0 if command does not match sdev
5104 static int ipr_match_res(struct ipr_cmnd *ipr_cmd, void *resource)
5106 struct ipr_resource_entry *res = resource;
5108 if (res && ipr_cmd->ioarcb.res_handle == res->res_handle)
5114 * ipr_wait_for_ops - Wait for matching commands to complete
5115 * @ipr_cmd: ipr command struct
5116 * @device: device to match (sdev)
5117 * @match: match function to use
5122 static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
5123 int (*match)(struct ipr_cmnd *, void *))
5125 struct ipr_cmnd *ipr_cmd;
5127 unsigned long flags;
5128 struct ipr_hrr_queue *hrrq;
5129 signed long timeout = IPR_ABORT_TASK_TIMEOUT;
5130 DECLARE_COMPLETION_ONSTACK(comp);
5136 for_each_hrrq(hrrq, ioa_cfg) {
5137 spin_lock_irqsave(hrrq->lock, flags);
5138 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5139 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5140 if (!ipr_cmnd_is_free(ipr_cmd)) {
5141 if (match(ipr_cmd, device)) {
5142 ipr_cmd->eh_comp = ∁
5147 spin_unlock_irqrestore(hrrq->lock, flags);
5151 timeout = wait_for_completion_timeout(&comp, timeout);
5156 for_each_hrrq(hrrq, ioa_cfg) {
5157 spin_lock_irqsave(hrrq->lock, flags);
5158 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5159 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5160 if (!ipr_cmnd_is_free(ipr_cmd)) {
5161 if (match(ipr_cmd, device)) {
5162 ipr_cmd->eh_comp = NULL;
5167 spin_unlock_irqrestore(hrrq->lock, flags);
5171 dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
5173 return wait ? FAILED : SUCCESS;
5182 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
5184 struct ipr_ioa_cfg *ioa_cfg;
5185 unsigned long lock_flags = 0;
5189 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5190 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5192 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5193 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5194 dev_err(&ioa_cfg->pdev->dev,
5195 "Adapter being reset as a result of error recovery.\n");
5197 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5198 ioa_cfg->sdt_state = GET_DUMP;
5201 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5202 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5203 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5205 /* If we got hit with a host reset while we were already resetting
5206 the adapter for some reason, and the reset failed. */
5207 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5212 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5218 * ipr_device_reset - Reset the device
5219 * @ioa_cfg: ioa config struct
5220 * @res: resource entry struct
5222 * This function issues a device reset to the affected device.
5223 * If the device is a SCSI device, a LUN reset will be sent
5224 * to the device first. If that does not work, a target reset
5225 * will be sent. If the device is a SATA device, a PHY reset will
5229 * 0 on success / non-zero on failure
5231 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
5232 struct ipr_resource_entry *res)
5234 struct ipr_cmnd *ipr_cmd;
5235 struct ipr_ioarcb *ioarcb;
5236 struct ipr_cmd_pkt *cmd_pkt;
5237 struct ipr_ioarcb_ata_regs *regs;
5241 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5242 ioarcb = &ipr_cmd->ioarcb;
5243 cmd_pkt = &ioarcb->cmd_pkt;
5245 if (ipr_cmd->ioa_cfg->sis64) {
5246 regs = &ipr_cmd->i.ata_ioadl.regs;
5247 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5249 regs = &ioarcb->u.add_data.u.regs;
5251 ioarcb->res_handle = res->res_handle;
5252 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5253 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5254 if (ipr_is_gata(res)) {
5255 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
5256 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
5257 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5260 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5261 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5262 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5263 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
5264 if (ipr_cmd->ioa_cfg->sis64)
5265 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5266 sizeof(struct ipr_ioasa_gata));
5268 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5269 sizeof(struct ipr_ioasa_gata));
5273 return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
5277 * ipr_sata_reset - Reset the SATA port
5278 * @link: SATA link to reset
5279 * @classes: class of the attached device
5281 * This function issues a SATA phy reset to the affected ATA link.
5284 * 0 on success / non-zero on failure
5286 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
5287 unsigned long deadline)
5289 struct ipr_sata_port *sata_port = link->ap->private_data;
5290 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5291 struct ipr_resource_entry *res;
5292 unsigned long lock_flags = 0;
5293 int rc = -ENXIO, ret;
5296 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5297 while (ioa_cfg->in_reset_reload) {
5298 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5299 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5300 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5303 res = sata_port->res;
5305 rc = ipr_device_reset(ioa_cfg, res);
5306 *classes = res->ata_class;
5307 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5309 ret = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5310 if (ret != SUCCESS) {
5311 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5312 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5313 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5315 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5318 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5325 * ipr_eh_dev_reset - Reset the device
5326 * @scsi_cmd: scsi command struct
5328 * This function issues a device reset to the affected device.
5329 * A LUN reset will be sent to the device first. If that does
5330 * not work, a target reset will be sent.
5335 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
5337 struct ipr_cmnd *ipr_cmd;
5338 struct ipr_ioa_cfg *ioa_cfg;
5339 struct ipr_resource_entry *res;
5340 struct ata_port *ap;
5342 struct ipr_hrr_queue *hrrq;
5345 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5346 res = scsi_cmd->device->hostdata;
5349 * If we are currently going through reset/reload, return failed. This will force the
5350 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5353 if (ioa_cfg->in_reset_reload)
5355 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5358 for_each_hrrq(hrrq, ioa_cfg) {
5359 spin_lock(&hrrq->_lock);
5360 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5361 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5363 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5366 if (ipr_cmnd_is_free(ipr_cmd))
5369 ipr_cmd->done = ipr_sata_eh_done;
5370 if (!(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5371 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5372 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5376 spin_unlock(&hrrq->_lock);
5378 res->resetting_device = 1;
5379 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
5381 if (ipr_is_gata(res) && res->sata_port) {
5382 ap = res->sata_port->ap;
5383 spin_unlock_irq(scsi_cmd->device->host->host_lock);
5384 ata_std_error_handler(ap);
5385 spin_lock_irq(scsi_cmd->device->host->host_lock);
5387 rc = ipr_device_reset(ioa_cfg, res);
5388 res->resetting_device = 0;
5389 res->reset_occurred = 1;
5392 return rc ? FAILED : SUCCESS;
5395 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5398 struct ipr_ioa_cfg *ioa_cfg;
5399 struct ipr_resource_entry *res;
5401 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5402 res = cmd->device->hostdata;
5407 spin_lock_irq(cmd->device->host->host_lock);
5408 rc = __ipr_eh_dev_reset(cmd);
5409 spin_unlock_irq(cmd->device->host->host_lock);
5411 if (rc == SUCCESS) {
5412 if (ipr_is_gata(res) && res->sata_port)
5413 rc = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5415 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5422 * ipr_bus_reset_done - Op done function for bus reset.
5423 * @ipr_cmd: ipr command struct
5425 * This function is the op done function for a bus reset
5430 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5432 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5433 struct ipr_resource_entry *res;
5436 if (!ioa_cfg->sis64)
5437 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5438 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5439 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5445 * If abort has not completed, indicate the reset has, else call the
5446 * abort's done function to wake the sleeping eh thread
5448 if (ipr_cmd->sibling->sibling)
5449 ipr_cmd->sibling->sibling = NULL;
5451 ipr_cmd->sibling->done(ipr_cmd->sibling);
5453 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5458 * ipr_abort_timeout - An abort task has timed out
5459 * @ipr_cmd: ipr command struct
5461 * This function handles when an abort task times out. If this
5462 * happens we issue a bus reset since we have resources tied
5463 * up that must be freed before returning to the midlayer.
5468 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
5470 struct ipr_cmnd *reset_cmd;
5471 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5472 struct ipr_cmd_pkt *cmd_pkt;
5473 unsigned long lock_flags = 0;
5476 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5477 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5478 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5482 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5483 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5484 ipr_cmd->sibling = reset_cmd;
5485 reset_cmd->sibling = ipr_cmd;
5486 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5487 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5488 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5489 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5490 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5492 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5493 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5498 * ipr_cancel_op - Cancel specified op
5499 * @scsi_cmd: scsi command struct
5501 * This function cancels specified op.
5506 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5508 struct ipr_cmnd *ipr_cmd;
5509 struct ipr_ioa_cfg *ioa_cfg;
5510 struct ipr_resource_entry *res;
5511 struct ipr_cmd_pkt *cmd_pkt;
5513 int i, op_found = 0;
5514 struct ipr_hrr_queue *hrrq;
5517 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5518 res = scsi_cmd->device->hostdata;
5520 /* If we are currently going through reset/reload, return failed.
5521 * This will force the mid-layer to call ipr_eh_host_reset,
5522 * which will then go to sleep and wait for the reset to complete
5524 if (ioa_cfg->in_reset_reload ||
5525 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5531 * If we are aborting a timed out op, chances are that the timeout was caused
5532 * by a still not detected EEH error. In such cases, reading a register will
5533 * trigger the EEH recovery infrastructure.
5535 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5537 if (!ipr_is_gscsi(res))
5540 for_each_hrrq(hrrq, ioa_cfg) {
5541 spin_lock(&hrrq->_lock);
5542 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5543 if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) {
5544 if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) {
5550 spin_unlock(&hrrq->_lock);
5556 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5557 ipr_cmd->ioarcb.res_handle = res->res_handle;
5558 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5559 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5560 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5561 ipr_cmd->u.sdev = scsi_cmd->device;
5563 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5565 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5566 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5569 * If the abort task timed out and we sent a bus reset, we will get
5570 * one the following responses to the abort
5572 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5577 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5578 if (!ipr_is_naca_model(res))
5579 res->needs_sync_complete = 1;
5582 return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5586 * ipr_eh_abort - Abort a single op
5587 * @scsi_cmd: scsi command struct
5590 * 0 if scan in progress / 1 if scan is complete
5592 static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5594 unsigned long lock_flags;
5595 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5598 spin_lock_irqsave(shost->host_lock, lock_flags);
5599 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5601 if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5603 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5608 * ipr_eh_host_reset - Reset the host adapter
5609 * @scsi_cmd: scsi command struct
5614 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5616 unsigned long flags;
5618 struct ipr_ioa_cfg *ioa_cfg;
5622 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5624 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5625 rc = ipr_cancel_op(scsi_cmd);
5626 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5629 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
5635 * ipr_handle_other_interrupt - Handle "other" interrupts
5636 * @ioa_cfg: ioa config struct
5637 * @int_reg: interrupt register
5640 * IRQ_NONE / IRQ_HANDLED
5642 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5645 irqreturn_t rc = IRQ_HANDLED;
5648 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5649 int_reg &= ~int_mask_reg;
5651 /* If an interrupt on the adapter did not occur, ignore it.
5652 * Or in the case of SIS 64, check for a stage change interrupt.
5654 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5655 if (ioa_cfg->sis64) {
5656 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5657 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5658 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5660 /* clear stage change */
5661 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5662 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5663 list_del(&ioa_cfg->reset_cmd->queue);
5664 del_timer(&ioa_cfg->reset_cmd->timer);
5665 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5673 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5674 /* Mask the interrupt */
5675 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5676 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5678 list_del(&ioa_cfg->reset_cmd->queue);
5679 del_timer(&ioa_cfg->reset_cmd->timer);
5680 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5681 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5682 if (ioa_cfg->clear_isr) {
5683 if (ipr_debug && printk_ratelimit())
5684 dev_err(&ioa_cfg->pdev->dev,
5685 "Spurious interrupt detected. 0x%08X\n", int_reg);
5686 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5687 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5691 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5692 ioa_cfg->ioa_unit_checked = 1;
5693 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5694 dev_err(&ioa_cfg->pdev->dev,
5695 "No Host RRQ. 0x%08X\n", int_reg);
5697 dev_err(&ioa_cfg->pdev->dev,
5698 "Permanent IOA failure. 0x%08X\n", int_reg);
5700 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5701 ioa_cfg->sdt_state = GET_DUMP;
5703 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5704 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5711 * ipr_isr_eh - Interrupt service routine error handler
5712 * @ioa_cfg: ioa config struct
5713 * @msg: message to log
5718 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5720 ioa_cfg->errors_logged++;
5721 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5723 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5724 ioa_cfg->sdt_state = GET_DUMP;
5726 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5729 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5730 struct list_head *doneq)
5734 struct ipr_cmnd *ipr_cmd;
5735 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5738 /* If interrupts are disabled, ignore the interrupt */
5739 if (!hrr_queue->allow_interrupts)
5742 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5743 hrr_queue->toggle_bit) {
5745 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5746 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5747 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5749 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5750 cmd_index < hrr_queue->min_cmd_id)) {
5752 "Invalid response handle from IOA: ",
5757 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5758 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5760 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5762 list_move_tail(&ipr_cmd->queue, doneq);
5764 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5765 hrr_queue->hrrq_curr++;
5767 hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5768 hrr_queue->toggle_bit ^= 1u;
5771 if (budget > 0 && num_hrrq >= budget)
5778 static int ipr_iopoll(struct irq_poll *iop, int budget)
5780 struct ipr_ioa_cfg *ioa_cfg;
5781 struct ipr_hrr_queue *hrrq;
5782 struct ipr_cmnd *ipr_cmd, *temp;
5783 unsigned long hrrq_flags;
5787 hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5788 ioa_cfg = hrrq->ioa_cfg;
5790 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5791 completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5793 if (completed_ops < budget)
5794 irq_poll_complete(iop);
5795 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5797 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5798 list_del(&ipr_cmd->queue);
5799 del_timer(&ipr_cmd->timer);
5800 ipr_cmd->fast_done(ipr_cmd);
5803 return completed_ops;
5807 * ipr_isr - Interrupt service routine
5809 * @devp: pointer to ioa config struct
5812 * IRQ_NONE / IRQ_HANDLED
5814 static irqreturn_t ipr_isr(int irq, void *devp)
5816 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5817 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5818 unsigned long hrrq_flags = 0;
5822 struct ipr_cmnd *ipr_cmd, *temp;
5823 irqreturn_t rc = IRQ_NONE;
5826 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5827 /* If interrupts are disabled, ignore the interrupt */
5828 if (!hrrq->allow_interrupts) {
5829 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5834 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5837 if (!ioa_cfg->clear_isr)
5840 /* Clear the PCI interrupt */
5843 writel(IPR_PCII_HRRQ_UPDATED,
5844 ioa_cfg->regs.clr_interrupt_reg32);
5845 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5846 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5847 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5849 } else if (rc == IRQ_NONE && irq_none == 0) {
5850 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5852 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5853 int_reg & IPR_PCII_HRRQ_UPDATED) {
5855 "Error clearing HRRQ: ", num_hrrq);
5862 if (unlikely(rc == IRQ_NONE))
5863 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5865 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5866 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5867 list_del(&ipr_cmd->queue);
5868 del_timer(&ipr_cmd->timer);
5869 ipr_cmd->fast_done(ipr_cmd);
5875 * ipr_isr_mhrrq - Interrupt service routine
5877 * @devp: pointer to ioa config struct
5880 * IRQ_NONE / IRQ_HANDLED
5882 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5884 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5885 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5886 unsigned long hrrq_flags = 0;
5887 struct ipr_cmnd *ipr_cmd, *temp;
5888 irqreturn_t rc = IRQ_NONE;
5891 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5893 /* If interrupts are disabled, ignore the interrupt */
5894 if (!hrrq->allow_interrupts) {
5895 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5899 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5900 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5902 irq_poll_sched(&hrrq->iopoll);
5903 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5907 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5910 if (ipr_process_hrrq(hrrq, -1, &doneq))
5914 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5916 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5917 list_del(&ipr_cmd->queue);
5918 del_timer(&ipr_cmd->timer);
5919 ipr_cmd->fast_done(ipr_cmd);
5925 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5926 * @ioa_cfg: ioa config struct
5927 * @ipr_cmd: ipr command struct
5930 * 0 on success / -1 on failure
5932 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5933 struct ipr_cmnd *ipr_cmd)
5936 struct scatterlist *sg;
5938 u32 ioadl_flags = 0;
5939 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5940 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5941 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5943 length = scsi_bufflen(scsi_cmd);
5947 nseg = scsi_dma_map(scsi_cmd);
5949 if (printk_ratelimit())
5950 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5954 ipr_cmd->dma_use_sg = nseg;
5956 ioarcb->data_transfer_length = cpu_to_be32(length);
5958 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5960 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5961 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5962 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5963 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5964 ioadl_flags = IPR_IOADL_FLAGS_READ;
5966 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5967 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5968 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5969 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5972 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5977 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5978 * @ioa_cfg: ioa config struct
5979 * @ipr_cmd: ipr command struct
5982 * 0 on success / -1 on failure
5984 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5985 struct ipr_cmnd *ipr_cmd)
5988 struct scatterlist *sg;
5990 u32 ioadl_flags = 0;
5991 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5992 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5993 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5995 length = scsi_bufflen(scsi_cmd);
5999 nseg = scsi_dma_map(scsi_cmd);
6001 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
6005 ipr_cmd->dma_use_sg = nseg;
6007 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
6008 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6009 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6010 ioarcb->data_transfer_length = cpu_to_be32(length);
6012 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6013 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
6014 ioadl_flags = IPR_IOADL_FLAGS_READ;
6015 ioarcb->read_data_transfer_length = cpu_to_be32(length);
6016 ioarcb->read_ioadl_len =
6017 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6020 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
6021 ioadl = ioarcb->u.add_data.u.ioadl;
6022 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
6023 offsetof(struct ipr_ioarcb, u.add_data));
6024 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6027 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
6028 ioadl[i].flags_and_data_len =
6029 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6030 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
6033 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6038 * __ipr_erp_done - Process completion of ERP for a device
6039 * @ipr_cmd: ipr command struct
6041 * This function copies the sense buffer into the scsi_cmd
6042 * struct and pushes the scsi_done function.
6047 static void __ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6049 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6050 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6051 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6053 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6054 scsi_cmd->result |= (DID_ERROR << 16);
6055 scmd_printk(KERN_ERR, scsi_cmd,
6056 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
6058 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
6059 SCSI_SENSE_BUFFERSIZE);
6063 if (!ipr_is_naca_model(res))
6064 res->needs_sync_complete = 1;
6067 scsi_dma_unmap(ipr_cmd->scsi_cmd);
6068 scsi_cmd->scsi_done(scsi_cmd);
6069 if (ipr_cmd->eh_comp)
6070 complete(ipr_cmd->eh_comp);
6071 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6075 * ipr_erp_done - Process completion of ERP for a device
6076 * @ipr_cmd: ipr command struct
6078 * This function copies the sense buffer into the scsi_cmd
6079 * struct and pushes the scsi_done function.
6084 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6086 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6087 unsigned long hrrq_flags;
6089 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6090 __ipr_erp_done(ipr_cmd);
6091 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6095 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
6096 * @ipr_cmd: ipr command struct
6101 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
6103 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6104 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6105 dma_addr_t dma_addr = ipr_cmd->dma_addr;
6107 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
6108 ioarcb->data_transfer_length = 0;
6109 ioarcb->read_data_transfer_length = 0;
6110 ioarcb->ioadl_len = 0;
6111 ioarcb->read_ioadl_len = 0;
6112 ioasa->hdr.ioasc = 0;
6113 ioasa->hdr.residual_data_len = 0;
6115 if (ipr_cmd->ioa_cfg->sis64)
6116 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6117 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
6119 ioarcb->write_ioadl_addr =
6120 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
6121 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6126 * __ipr_erp_request_sense - Send request sense to a device
6127 * @ipr_cmd: ipr command struct
6129 * This function sends a request sense to a device as a result
6130 * of a check condition.
6135 static void __ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6137 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6138 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6140 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6141 __ipr_erp_done(ipr_cmd);
6145 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6147 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
6148 cmd_pkt->cdb[0] = REQUEST_SENSE;
6149 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
6150 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
6151 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6152 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
6154 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
6155 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
6157 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
6158 IPR_REQUEST_SENSE_TIMEOUT * 2);
6162 * ipr_erp_request_sense - Send request sense to a device
6163 * @ipr_cmd: ipr command struct
6165 * This function sends a request sense to a device as a result
6166 * of a check condition.
6171 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6173 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6174 unsigned long hrrq_flags;
6176 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6177 __ipr_erp_request_sense(ipr_cmd);
6178 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6182 * ipr_erp_cancel_all - Send cancel all to a device
6183 * @ipr_cmd: ipr command struct
6185 * This function sends a cancel all to a device to clear the
6186 * queue. If we are running TCQ on the device, QERR is set to 1,
6187 * which means all outstanding ops have been dropped on the floor.
6188 * Cancel all will return them to us.
6193 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
6195 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6196 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6197 struct ipr_cmd_pkt *cmd_pkt;
6201 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6203 if (!scsi_cmd->device->simple_tags) {
6204 __ipr_erp_request_sense(ipr_cmd);
6208 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6209 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
6210 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
6212 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
6213 IPR_CANCEL_ALL_TIMEOUT);
6217 * ipr_dump_ioasa - Dump contents of IOASA
6218 * @ioa_cfg: ioa config struct
6219 * @ipr_cmd: ipr command struct
6220 * @res: resource entry struct
6222 * This function is invoked by the interrupt handler when ops
6223 * fail. It will log the IOASA if appropriate. Only called
6229 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
6230 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
6234 u32 ioasc, fd_ioasc;
6235 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6236 __be32 *ioasa_data = (__be32 *)ioasa;
6239 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
6240 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
6245 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
6248 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
6249 error_index = ipr_get_error(fd_ioasc);
6251 error_index = ipr_get_error(ioasc);
6253 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
6254 /* Don't log an error if the IOA already logged one */
6255 if (ioasa->hdr.ilid != 0)
6258 if (!ipr_is_gscsi(res))
6261 if (ipr_error_table[error_index].log_ioasa == 0)
6265 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
6267 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
6268 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
6269 data_len = sizeof(struct ipr_ioasa64);
6270 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
6271 data_len = sizeof(struct ipr_ioasa);
6273 ipr_err("IOASA Dump:\n");
6275 for (i = 0; i < data_len / 4; i += 4) {
6276 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
6277 be32_to_cpu(ioasa_data[i]),
6278 be32_to_cpu(ioasa_data[i+1]),
6279 be32_to_cpu(ioasa_data[i+2]),
6280 be32_to_cpu(ioasa_data[i+3]));
6285 * ipr_gen_sense - Generate SCSI sense data from an IOASA
6287 * @sense_buf: sense data buffer
6292 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
6295 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
6296 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
6297 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6298 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
6300 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
6302 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
6305 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
6307 if (ipr_is_vset_device(res) &&
6308 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
6309 ioasa->u.vset.failing_lba_hi != 0) {
6310 sense_buf[0] = 0x72;
6311 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
6312 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
6313 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
6317 sense_buf[9] = 0x0A;
6318 sense_buf[10] = 0x80;
6320 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
6322 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
6323 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
6324 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
6325 sense_buf[15] = failing_lba & 0x000000ff;
6327 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6329 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
6330 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
6331 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
6332 sense_buf[19] = failing_lba & 0x000000ff;
6334 sense_buf[0] = 0x70;
6335 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
6336 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
6337 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
6339 /* Illegal request */
6340 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
6341 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
6342 sense_buf[7] = 10; /* additional length */
6344 /* IOARCB was in error */
6345 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
6346 sense_buf[15] = 0xC0;
6347 else /* Parameter data was invalid */
6348 sense_buf[15] = 0x80;
6351 ((IPR_FIELD_POINTER_MASK &
6352 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
6354 (IPR_FIELD_POINTER_MASK &
6355 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
6357 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
6358 if (ipr_is_vset_device(res))
6359 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6361 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6363 sense_buf[0] |= 0x80; /* Or in the Valid bit */
6364 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6365 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6366 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6367 sense_buf[6] = failing_lba & 0x000000ff;
6370 sense_buf[7] = 6; /* additional length */
6376 * ipr_get_autosense - Copy autosense data to sense buffer
6377 * @ipr_cmd: ipr command struct
6379 * This function copies the autosense buffer to the buffer
6380 * in the scsi_cmd, if there is autosense available.
6383 * 1 if autosense was available / 0 if not
6385 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6387 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6388 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
6390 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
6393 if (ipr_cmd->ioa_cfg->sis64)
6394 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6395 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6396 SCSI_SENSE_BUFFERSIZE));
6398 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6399 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6400 SCSI_SENSE_BUFFERSIZE));
6405 * ipr_erp_start - Process an error response for a SCSI op
6406 * @ioa_cfg: ioa config struct
6407 * @ipr_cmd: ipr command struct
6409 * This function determines whether or not to initiate ERP
6410 * on the affected device.
6415 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6416 struct ipr_cmnd *ipr_cmd)
6418 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6419 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6420 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6421 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
6424 __ipr_scsi_eh_done(ipr_cmd);
6428 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6429 ipr_gen_sense(ipr_cmd);
6431 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6433 switch (masked_ioasc) {
6434 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6435 if (ipr_is_naca_model(res))
6436 scsi_cmd->result |= (DID_ABORT << 16);
6438 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6440 case IPR_IOASC_IR_RESOURCE_HANDLE:
6441 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6442 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6444 case IPR_IOASC_HW_SEL_TIMEOUT:
6445 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6446 if (!ipr_is_naca_model(res))
6447 res->needs_sync_complete = 1;
6449 case IPR_IOASC_SYNC_REQUIRED:
6451 res->needs_sync_complete = 1;
6452 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6454 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6455 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6457 * exception: do not set DID_PASSTHROUGH on CHECK CONDITION
6458 * so SCSI mid-layer and upper layers handle it accordingly.
6460 if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION)
6461 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6463 case IPR_IOASC_BUS_WAS_RESET:
6464 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6466 * Report the bus reset and ask for a retry. The device
6467 * will give CC/UA the next command.
6469 if (!res->resetting_device)
6470 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6471 scsi_cmd->result |= (DID_ERROR << 16);
6472 if (!ipr_is_naca_model(res))
6473 res->needs_sync_complete = 1;
6475 case IPR_IOASC_HW_DEV_BUS_STATUS:
6476 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6477 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6478 if (!ipr_get_autosense(ipr_cmd)) {
6479 if (!ipr_is_naca_model(res)) {
6480 ipr_erp_cancel_all(ipr_cmd);
6485 if (!ipr_is_naca_model(res))
6486 res->needs_sync_complete = 1;
6488 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6490 case IPR_IOASC_IR_NON_OPTIMIZED:
6491 if (res->raw_mode) {
6493 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6495 scsi_cmd->result |= (DID_ERROR << 16);
6498 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6499 scsi_cmd->result |= (DID_ERROR << 16);
6500 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6501 res->needs_sync_complete = 1;
6505 scsi_dma_unmap(ipr_cmd->scsi_cmd);
6506 scsi_cmd->scsi_done(scsi_cmd);
6507 if (ipr_cmd->eh_comp)
6508 complete(ipr_cmd->eh_comp);
6509 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6513 * ipr_scsi_done - mid-layer done function
6514 * @ipr_cmd: ipr command struct
6516 * This function is invoked by the interrupt handler for
6517 * ops generated by the SCSI mid-layer
6522 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6524 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6525 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6526 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6527 unsigned long lock_flags;
6529 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6531 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6532 scsi_dma_unmap(scsi_cmd);
6534 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
6535 scsi_cmd->scsi_done(scsi_cmd);
6536 if (ipr_cmd->eh_comp)
6537 complete(ipr_cmd->eh_comp);
6538 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6539 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
6541 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6542 spin_lock(&ipr_cmd->hrrq->_lock);
6543 ipr_erp_start(ioa_cfg, ipr_cmd);
6544 spin_unlock(&ipr_cmd->hrrq->_lock);
6545 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6550 * ipr_queuecommand - Queue a mid-layer request
6551 * @shost: scsi host struct
6552 * @scsi_cmd: scsi command struct
6554 * This function queues a request generated by the mid-layer.
6558 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6559 * SCSI_MLQUEUE_HOST_BUSY if host is busy
6561 static int ipr_queuecommand(struct Scsi_Host *shost,
6562 struct scsi_cmnd *scsi_cmd)
6564 struct ipr_ioa_cfg *ioa_cfg;
6565 struct ipr_resource_entry *res;
6566 struct ipr_ioarcb *ioarcb;
6567 struct ipr_cmnd *ipr_cmd;
6568 unsigned long hrrq_flags, lock_flags;
6570 struct ipr_hrr_queue *hrrq;
6573 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6575 scsi_cmd->result = (DID_OK << 16);
6576 res = scsi_cmd->device->hostdata;
6578 if (ipr_is_gata(res) && res->sata_port) {
6579 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6580 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6581 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6585 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6586 hrrq = &ioa_cfg->hrrq[hrrq_id];
6588 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6590 * We are currently blocking all devices due to a host reset
6591 * We have told the host to stop giving us new requests, but
6592 * ERP ops don't count. FIXME
6594 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6595 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6596 return SCSI_MLQUEUE_HOST_BUSY;
6600 * FIXME - Create scsi_set_host_offline interface
6601 * and the ioa_is_dead check can be removed
6603 if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6604 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6608 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6609 if (ipr_cmd == NULL) {
6610 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6611 return SCSI_MLQUEUE_HOST_BUSY;
6613 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6615 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6616 ioarcb = &ipr_cmd->ioarcb;
6618 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6619 ipr_cmd->scsi_cmd = scsi_cmd;
6620 ipr_cmd->done = ipr_scsi_eh_done;
6622 if (ipr_is_gscsi(res)) {
6623 if (scsi_cmd->underflow == 0)
6624 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6626 if (res->reset_occurred) {
6627 res->reset_occurred = 0;
6628 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6632 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6633 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6635 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6636 if (scsi_cmd->flags & SCMD_TAGGED)
6637 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6639 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
6642 if (scsi_cmd->cmnd[0] >= 0xC0 &&
6643 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6644 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6646 if (res->raw_mode && ipr_is_af_dasd_device(res)) {
6647 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
6649 if (scsi_cmd->underflow == 0)
6650 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6654 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6656 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6658 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6659 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6660 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6661 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6663 scsi_dma_unmap(scsi_cmd);
6664 return SCSI_MLQUEUE_HOST_BUSY;
6667 if (unlikely(hrrq->ioa_is_dead)) {
6668 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6669 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6670 scsi_dma_unmap(scsi_cmd);
6674 ioarcb->res_handle = res->res_handle;
6675 if (res->needs_sync_complete) {
6676 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6677 res->needs_sync_complete = 0;
6679 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6680 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6681 ipr_send_command(ipr_cmd);
6682 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6686 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6687 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6688 scsi_cmd->result = (DID_NO_CONNECT << 16);
6689 scsi_cmd->scsi_done(scsi_cmd);
6690 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6695 * ipr_ioctl - IOCTL handler
6696 * @sdev: scsi device struct
6701 * 0 on success / other on failure
6703 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
6705 struct ipr_resource_entry *res;
6707 res = (struct ipr_resource_entry *)sdev->hostdata;
6708 if (res && ipr_is_gata(res)) {
6709 if (cmd == HDIO_GET_IDENTITY)
6711 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
6718 * ipr_info - Get information about the card/driver
6719 * @scsi_host: scsi host struct
6722 * pointer to buffer with description string
6724 static const char *ipr_ioa_info(struct Scsi_Host *host)
6726 static char buffer[512];
6727 struct ipr_ioa_cfg *ioa_cfg;
6728 unsigned long lock_flags = 0;
6730 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6732 spin_lock_irqsave(host->host_lock, lock_flags);
6733 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6734 spin_unlock_irqrestore(host->host_lock, lock_flags);
6739 static struct scsi_host_template driver_template = {
6740 .module = THIS_MODULE,
6742 .info = ipr_ioa_info,
6744 .queuecommand = ipr_queuecommand,
6745 .eh_abort_handler = ipr_eh_abort,
6746 .eh_device_reset_handler = ipr_eh_dev_reset,
6747 .eh_host_reset_handler = ipr_eh_host_reset,
6748 .slave_alloc = ipr_slave_alloc,
6749 .slave_configure = ipr_slave_configure,
6750 .slave_destroy = ipr_slave_destroy,
6751 .scan_finished = ipr_scan_finished,
6752 .target_alloc = ipr_target_alloc,
6753 .target_destroy = ipr_target_destroy,
6754 .change_queue_depth = ipr_change_queue_depth,
6755 .bios_param = ipr_biosparam,
6756 .can_queue = IPR_MAX_COMMANDS,
6758 .sg_tablesize = IPR_MAX_SGLIST,
6759 .max_sectors = IPR_IOA_MAX_SECTORS,
6760 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6761 .use_clustering = ENABLE_CLUSTERING,
6762 .shost_attrs = ipr_ioa_attrs,
6763 .sdev_attrs = ipr_dev_attrs,
6764 .proc_name = IPR_NAME,
6768 * ipr_ata_phy_reset - libata phy_reset handler
6769 * @ap: ata port to reset
6772 static void ipr_ata_phy_reset(struct ata_port *ap)
6774 unsigned long flags;
6775 struct ipr_sata_port *sata_port = ap->private_data;
6776 struct ipr_resource_entry *res = sata_port->res;
6777 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6781 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6782 while (ioa_cfg->in_reset_reload) {
6783 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6784 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6785 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6788 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6791 rc = ipr_device_reset(ioa_cfg, res);
6794 ap->link.device[0].class = ATA_DEV_NONE;
6798 ap->link.device[0].class = res->ata_class;
6799 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6800 ap->link.device[0].class = ATA_DEV_NONE;
6803 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6808 * ipr_ata_post_internal - Cleanup after an internal command
6809 * @qc: ATA queued command
6814 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6816 struct ipr_sata_port *sata_port = qc->ap->private_data;
6817 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6818 struct ipr_cmnd *ipr_cmd;
6819 struct ipr_hrr_queue *hrrq;
6820 unsigned long flags;
6822 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6823 while (ioa_cfg->in_reset_reload) {
6824 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6825 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6826 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6829 for_each_hrrq(hrrq, ioa_cfg) {
6830 spin_lock(&hrrq->_lock);
6831 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6832 if (ipr_cmd->qc == qc) {
6833 ipr_device_reset(ioa_cfg, sata_port->res);
6837 spin_unlock(&hrrq->_lock);
6839 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6843 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6844 * @regs: destination
6845 * @tf: source ATA taskfile
6850 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6851 struct ata_taskfile *tf)
6853 regs->feature = tf->feature;
6854 regs->nsect = tf->nsect;
6855 regs->lbal = tf->lbal;
6856 regs->lbam = tf->lbam;
6857 regs->lbah = tf->lbah;
6858 regs->device = tf->device;
6859 regs->command = tf->command;
6860 regs->hob_feature = tf->hob_feature;
6861 regs->hob_nsect = tf->hob_nsect;
6862 regs->hob_lbal = tf->hob_lbal;
6863 regs->hob_lbam = tf->hob_lbam;
6864 regs->hob_lbah = tf->hob_lbah;
6865 regs->ctl = tf->ctl;
6869 * ipr_sata_done - done function for SATA commands
6870 * @ipr_cmd: ipr command struct
6872 * This function is invoked by the interrupt handler for
6873 * ops generated by the SCSI mid-layer to SATA devices
6878 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6880 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6881 struct ata_queued_cmd *qc = ipr_cmd->qc;
6882 struct ipr_sata_port *sata_port = qc->ap->private_data;
6883 struct ipr_resource_entry *res = sata_port->res;
6884 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6886 spin_lock(&ipr_cmd->hrrq->_lock);
6887 if (ipr_cmd->ioa_cfg->sis64)
6888 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6889 sizeof(struct ipr_ioasa_gata));
6891 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6892 sizeof(struct ipr_ioasa_gata));
6893 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6895 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6896 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6898 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6899 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6901 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6902 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6903 spin_unlock(&ipr_cmd->hrrq->_lock);
6904 ata_qc_complete(qc);
6908 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6909 * @ipr_cmd: ipr command struct
6910 * @qc: ATA queued command
6913 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6914 struct ata_queued_cmd *qc)
6916 u32 ioadl_flags = 0;
6917 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6918 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
6919 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6920 int len = qc->nbytes;
6921 struct scatterlist *sg;
6923 dma_addr_t dma_addr = ipr_cmd->dma_addr;
6928 if (qc->dma_dir == DMA_TO_DEVICE) {
6929 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6930 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6931 } else if (qc->dma_dir == DMA_FROM_DEVICE)
6932 ioadl_flags = IPR_IOADL_FLAGS_READ;
6934 ioarcb->data_transfer_length = cpu_to_be32(len);
6936 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6937 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6938 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
6940 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6941 ioadl64->flags = cpu_to_be32(ioadl_flags);
6942 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6943 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6945 last_ioadl64 = ioadl64;
6949 if (likely(last_ioadl64))
6950 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6954 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6955 * @ipr_cmd: ipr command struct
6956 * @qc: ATA queued command
6959 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6960 struct ata_queued_cmd *qc)
6962 u32 ioadl_flags = 0;
6963 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6964 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6965 struct ipr_ioadl_desc *last_ioadl = NULL;
6966 int len = qc->nbytes;
6967 struct scatterlist *sg;
6973 if (qc->dma_dir == DMA_TO_DEVICE) {
6974 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6975 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6976 ioarcb->data_transfer_length = cpu_to_be32(len);
6978 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6979 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6980 ioadl_flags = IPR_IOADL_FLAGS_READ;
6981 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6982 ioarcb->read_ioadl_len =
6983 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6986 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6987 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6988 ioadl->address = cpu_to_be32(sg_dma_address(sg));
6994 if (likely(last_ioadl))
6995 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6999 * ipr_qc_defer - Get a free ipr_cmd
7000 * @qc: queued command
7005 static int ipr_qc_defer(struct ata_queued_cmd *qc)
7007 struct ata_port *ap = qc->ap;
7008 struct ipr_sata_port *sata_port = ap->private_data;
7009 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
7010 struct ipr_cmnd *ipr_cmd;
7011 struct ipr_hrr_queue *hrrq;
7014 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
7015 hrrq = &ioa_cfg->hrrq[hrrq_id];
7017 qc->lldd_task = NULL;
7018 spin_lock(&hrrq->_lock);
7019 if (unlikely(hrrq->ioa_is_dead)) {
7020 spin_unlock(&hrrq->_lock);
7024 if (unlikely(!hrrq->allow_cmds)) {
7025 spin_unlock(&hrrq->_lock);
7026 return ATA_DEFER_LINK;
7029 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
7030 if (ipr_cmd == NULL) {
7031 spin_unlock(&hrrq->_lock);
7032 return ATA_DEFER_LINK;
7035 qc->lldd_task = ipr_cmd;
7036 spin_unlock(&hrrq->_lock);
7041 * ipr_qc_issue - Issue a SATA qc to a device
7042 * @qc: queued command
7047 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
7049 struct ata_port *ap = qc->ap;
7050 struct ipr_sata_port *sata_port = ap->private_data;
7051 struct ipr_resource_entry *res = sata_port->res;
7052 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
7053 struct ipr_cmnd *ipr_cmd;
7054 struct ipr_ioarcb *ioarcb;
7055 struct ipr_ioarcb_ata_regs *regs;
7057 if (qc->lldd_task == NULL)
7060 ipr_cmd = qc->lldd_task;
7061 if (ipr_cmd == NULL)
7062 return AC_ERR_SYSTEM;
7064 qc->lldd_task = NULL;
7065 spin_lock(&ipr_cmd->hrrq->_lock);
7066 if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
7067 ipr_cmd->hrrq->ioa_is_dead)) {
7068 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7069 spin_unlock(&ipr_cmd->hrrq->_lock);
7070 return AC_ERR_SYSTEM;
7073 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
7074 ioarcb = &ipr_cmd->ioarcb;
7076 if (ioa_cfg->sis64) {
7077 regs = &ipr_cmd->i.ata_ioadl.regs;
7078 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
7080 regs = &ioarcb->u.add_data.u.regs;
7082 memset(regs, 0, sizeof(*regs));
7083 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
7085 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7087 ipr_cmd->done = ipr_sata_done;
7088 ipr_cmd->ioarcb.res_handle = res->res_handle;
7089 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
7090 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
7091 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
7092 ipr_cmd->dma_use_sg = qc->n_elem;
7095 ipr_build_ata_ioadl64(ipr_cmd, qc);
7097 ipr_build_ata_ioadl(ipr_cmd, qc);
7099 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
7100 ipr_copy_sata_tf(regs, &qc->tf);
7101 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
7102 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
7104 switch (qc->tf.protocol) {
7105 case ATA_PROT_NODATA:
7110 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7113 case ATAPI_PROT_PIO:
7114 case ATAPI_PROT_NODATA:
7115 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7118 case ATAPI_PROT_DMA:
7119 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7120 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7125 spin_unlock(&ipr_cmd->hrrq->_lock);
7126 return AC_ERR_INVALID;
7129 ipr_send_command(ipr_cmd);
7130 spin_unlock(&ipr_cmd->hrrq->_lock);
7136 * ipr_qc_fill_rtf - Read result TF
7137 * @qc: ATA queued command
7142 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
7144 struct ipr_sata_port *sata_port = qc->ap->private_data;
7145 struct ipr_ioasa_gata *g = &sata_port->ioasa;
7146 struct ata_taskfile *tf = &qc->result_tf;
7148 tf->feature = g->error;
7149 tf->nsect = g->nsect;
7153 tf->device = g->device;
7154 tf->command = g->status;
7155 tf->hob_nsect = g->hob_nsect;
7156 tf->hob_lbal = g->hob_lbal;
7157 tf->hob_lbam = g->hob_lbam;
7158 tf->hob_lbah = g->hob_lbah;
7163 static struct ata_port_operations ipr_sata_ops = {
7164 .phy_reset = ipr_ata_phy_reset,
7165 .hardreset = ipr_sata_reset,
7166 .post_internal_cmd = ipr_ata_post_internal,
7167 .qc_prep = ata_noop_qc_prep,
7168 .qc_defer = ipr_qc_defer,
7169 .qc_issue = ipr_qc_issue,
7170 .qc_fill_rtf = ipr_qc_fill_rtf,
7171 .port_start = ata_sas_port_start,
7172 .port_stop = ata_sas_port_stop
7175 static struct ata_port_info sata_port_info = {
7176 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
7178 .pio_mask = ATA_PIO4_ONLY,
7179 .mwdma_mask = ATA_MWDMA2,
7180 .udma_mask = ATA_UDMA6,
7181 .port_ops = &ipr_sata_ops
7184 #ifdef CONFIG_PPC_PSERIES
7185 static const u16 ipr_blocked_processors[] = {
7197 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
7198 * @ioa_cfg: ioa cfg struct
7200 * Adapters that use Gemstone revision < 3.1 do not work reliably on
7201 * certain pSeries hardware. This function determines if the given
7202 * adapter is in one of these confgurations or not.
7205 * 1 if adapter is not supported / 0 if adapter is supported
7207 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
7211 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
7212 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
7213 if (pvr_version_is(ipr_blocked_processors[i]))
7220 #define ipr_invalid_adapter(ioa_cfg) 0
7224 * ipr_ioa_bringdown_done - IOA bring down completion.
7225 * @ipr_cmd: ipr command struct
7227 * This function processes the completion of an adapter bring down.
7228 * It wakes any reset sleepers.
7233 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
7235 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7239 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
7241 ioa_cfg->scsi_unblock = 1;
7242 schedule_work(&ioa_cfg->work_q);
7245 ioa_cfg->in_reset_reload = 0;
7246 ioa_cfg->reset_retries = 0;
7247 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7248 spin_lock(&ioa_cfg->hrrq[i]._lock);
7249 ioa_cfg->hrrq[i].ioa_is_dead = 1;
7250 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7254 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7255 wake_up_all(&ioa_cfg->reset_wait_q);
7258 return IPR_RC_JOB_RETURN;
7262 * ipr_ioa_reset_done - IOA reset completion.
7263 * @ipr_cmd: ipr command struct
7265 * This function processes the completion of an adapter reset.
7266 * It schedules any necessary mid-layer add/removes and
7267 * wakes any reset sleepers.
7272 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
7274 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7275 struct ipr_resource_entry *res;
7279 ioa_cfg->in_reset_reload = 0;
7280 for (j = 0; j < ioa_cfg->hrrq_num; j++) {
7281 spin_lock(&ioa_cfg->hrrq[j]._lock);
7282 ioa_cfg->hrrq[j].allow_cmds = 1;
7283 spin_unlock(&ioa_cfg->hrrq[j]._lock);
7286 ioa_cfg->reset_cmd = NULL;
7287 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
7289 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
7290 if (res->add_to_ml || res->del_from_ml) {
7295 schedule_work(&ioa_cfg->work_q);
7297 for (j = 0; j < IPR_NUM_HCAMS; j++) {
7298 list_del_init(&ioa_cfg->hostrcb[j]->queue);
7299 if (j < IPR_NUM_LOG_HCAMS)
7300 ipr_send_hcam(ioa_cfg,
7301 IPR_HCAM_CDB_OP_CODE_LOG_DATA,
7302 ioa_cfg->hostrcb[j]);
7304 ipr_send_hcam(ioa_cfg,
7305 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
7306 ioa_cfg->hostrcb[j]);
7309 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
7310 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
7312 ioa_cfg->reset_retries = 0;
7313 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7314 wake_up_all(&ioa_cfg->reset_wait_q);
7316 ioa_cfg->scsi_unblock = 1;
7317 schedule_work(&ioa_cfg->work_q);
7319 return IPR_RC_JOB_RETURN;
7323 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7324 * @supported_dev: supported device struct
7325 * @vpids: vendor product id struct
7330 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
7331 struct ipr_std_inq_vpids *vpids)
7333 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
7334 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
7335 supported_dev->num_records = 1;
7336 supported_dev->data_length =
7337 cpu_to_be16(sizeof(struct ipr_supported_device));
7338 supported_dev->reserved = 0;
7342 * ipr_set_supported_devs - Send Set Supported Devices for a device
7343 * @ipr_cmd: ipr command struct
7345 * This function sends a Set Supported Devices to the adapter
7348 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7350 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
7352 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7353 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
7354 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7355 struct ipr_resource_entry *res = ipr_cmd->u.res;
7357 ipr_cmd->job_step = ipr_ioa_reset_done;
7359 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
7360 if (!ipr_is_scsi_disk(res))
7363 ipr_cmd->u.res = res;
7364 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
7366 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7367 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7368 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7370 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
7371 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
7372 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
7373 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
7375 ipr_init_ioadl(ipr_cmd,
7376 ioa_cfg->vpd_cbs_dma +
7377 offsetof(struct ipr_misc_cbs, supp_dev),
7378 sizeof(struct ipr_supported_device),
7379 IPR_IOADL_FLAGS_WRITE_LAST);
7381 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7382 IPR_SET_SUP_DEVICE_TIMEOUT);
7384 if (!ioa_cfg->sis64)
7385 ipr_cmd->job_step = ipr_set_supported_devs;
7387 return IPR_RC_JOB_RETURN;
7391 return IPR_RC_JOB_CONTINUE;
7395 * ipr_get_mode_page - Locate specified mode page
7396 * @mode_pages: mode page buffer
7397 * @page_code: page code to find
7398 * @len: minimum required length for mode page
7401 * pointer to mode page / NULL on failure
7403 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7404 u32 page_code, u32 len)
7406 struct ipr_mode_page_hdr *mode_hdr;
7410 if (!mode_pages || (mode_pages->hdr.length == 0))
7413 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7414 mode_hdr = (struct ipr_mode_page_hdr *)
7415 (mode_pages->data + mode_pages->hdr.block_desc_len);
7418 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7419 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7423 page_length = (sizeof(struct ipr_mode_page_hdr) +
7424 mode_hdr->page_length);
7425 length -= page_length;
7426 mode_hdr = (struct ipr_mode_page_hdr *)
7427 ((unsigned long)mode_hdr + page_length);
7434 * ipr_check_term_power - Check for term power errors
7435 * @ioa_cfg: ioa config struct
7436 * @mode_pages: IOAFP mode pages buffer
7438 * Check the IOAFP's mode page 28 for term power errors
7443 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7444 struct ipr_mode_pages *mode_pages)
7448 struct ipr_dev_bus_entry *bus;
7449 struct ipr_mode_page28 *mode_page;
7451 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7452 sizeof(struct ipr_mode_page28));
7454 entry_length = mode_page->entry_length;
7456 bus = mode_page->bus;
7458 for (i = 0; i < mode_page->num_entries; i++) {
7459 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7460 dev_err(&ioa_cfg->pdev->dev,
7461 "Term power is absent on scsi bus %d\n",
7465 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7470 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7471 * @ioa_cfg: ioa config struct
7473 * Looks through the config table checking for SES devices. If
7474 * the SES device is in the SES table indicating a maximum SCSI
7475 * bus speed, the speed is limited for the bus.
7480 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7485 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7486 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7487 ioa_cfg->bus_attr[i].bus_width);
7489 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7490 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7495 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7496 * @ioa_cfg: ioa config struct
7497 * @mode_pages: mode page 28 buffer
7499 * Updates mode page 28 based on driver configuration
7504 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7505 struct ipr_mode_pages *mode_pages)
7507 int i, entry_length;
7508 struct ipr_dev_bus_entry *bus;
7509 struct ipr_bus_attributes *bus_attr;
7510 struct ipr_mode_page28 *mode_page;
7512 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7513 sizeof(struct ipr_mode_page28));
7515 entry_length = mode_page->entry_length;
7517 /* Loop for each device bus entry */
7518 for (i = 0, bus = mode_page->bus;
7519 i < mode_page->num_entries;
7520 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7521 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7522 dev_err(&ioa_cfg->pdev->dev,
7523 "Invalid resource address reported: 0x%08X\n",
7524 IPR_GET_PHYS_LOC(bus->res_addr));
7528 bus_attr = &ioa_cfg->bus_attr[i];
7529 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7530 bus->bus_width = bus_attr->bus_width;
7531 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7532 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7533 if (bus_attr->qas_enabled)
7534 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7536 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7541 * ipr_build_mode_select - Build a mode select command
7542 * @ipr_cmd: ipr command struct
7543 * @res_handle: resource handle to send command to
7544 * @parm: Byte 2 of Mode Sense command
7545 * @dma_addr: DMA buffer address
7546 * @xfer_len: data transfer length
7551 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
7552 __be32 res_handle, u8 parm,
7553 dma_addr_t dma_addr, u8 xfer_len)
7555 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7557 ioarcb->res_handle = res_handle;
7558 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7559 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7560 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7561 ioarcb->cmd_pkt.cdb[1] = parm;
7562 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7564 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
7568 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7569 * @ipr_cmd: ipr command struct
7571 * This function sets up the SCSI bus attributes and sends
7572 * a Mode Select for Page 28 to activate them.
7577 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7579 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7580 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7584 ipr_scsi_bus_speed_limit(ioa_cfg);
7585 ipr_check_term_power(ioa_cfg, mode_pages);
7586 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7587 length = mode_pages->hdr.length + 1;
7588 mode_pages->hdr.length = 0;
7590 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7591 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7594 ipr_cmd->job_step = ipr_set_supported_devs;
7595 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7596 struct ipr_resource_entry, queue);
7597 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7600 return IPR_RC_JOB_RETURN;
7604 * ipr_build_mode_sense - Builds a mode sense command
7605 * @ipr_cmd: ipr command struct
7606 * @res: resource entry struct
7607 * @parm: Byte 2 of mode sense command
7608 * @dma_addr: DMA address of mode sense buffer
7609 * @xfer_len: Size of DMA buffer
7614 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7616 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
7618 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7620 ioarcb->res_handle = res_handle;
7621 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7622 ioarcb->cmd_pkt.cdb[2] = parm;
7623 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7624 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7626 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7630 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7631 * @ipr_cmd: ipr command struct
7633 * This function handles the failure of an IOA bringup command.
7638 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7640 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7641 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7643 dev_err(&ioa_cfg->pdev->dev,
7644 "0x%02X failed with IOASC: 0x%08X\n",
7645 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7647 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7648 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7649 return IPR_RC_JOB_RETURN;
7653 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7654 * @ipr_cmd: ipr command struct
7656 * This function handles the failure of a Mode Sense to the IOAFP.
7657 * Some adapters do not handle all mode pages.
7660 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7662 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7664 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7665 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7667 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7668 ipr_cmd->job_step = ipr_set_supported_devs;
7669 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7670 struct ipr_resource_entry, queue);
7671 return IPR_RC_JOB_CONTINUE;
7674 return ipr_reset_cmd_failed(ipr_cmd);
7678 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7679 * @ipr_cmd: ipr command struct
7681 * This function send a Page 28 mode sense to the IOA to
7682 * retrieve SCSI bus attributes.
7687 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7689 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7692 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7693 0x28, ioa_cfg->vpd_cbs_dma +
7694 offsetof(struct ipr_misc_cbs, mode_pages),
7695 sizeof(struct ipr_mode_pages));
7697 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
7698 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
7700 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7703 return IPR_RC_JOB_RETURN;
7707 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7708 * @ipr_cmd: ipr command struct
7710 * This function enables dual IOA RAID support if possible.
7715 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7717 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7718 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7719 struct ipr_mode_page24 *mode_page;
7723 mode_page = ipr_get_mode_page(mode_pages, 0x24,
7724 sizeof(struct ipr_mode_page24));
7727 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7729 length = mode_pages->hdr.length + 1;
7730 mode_pages->hdr.length = 0;
7732 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7733 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7736 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7737 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7740 return IPR_RC_JOB_RETURN;
7744 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7745 * @ipr_cmd: ipr command struct
7747 * This function handles the failure of a Mode Sense to the IOAFP.
7748 * Some adapters do not handle all mode pages.
7751 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7753 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7755 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7757 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7758 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7759 return IPR_RC_JOB_CONTINUE;
7762 return ipr_reset_cmd_failed(ipr_cmd);
7766 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7767 * @ipr_cmd: ipr command struct
7769 * This function send a mode sense to the IOA to retrieve
7770 * the IOA Advanced Function Control mode page.
7775 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7777 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7780 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7781 0x24, ioa_cfg->vpd_cbs_dma +
7782 offsetof(struct ipr_misc_cbs, mode_pages),
7783 sizeof(struct ipr_mode_pages));
7785 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7786 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7788 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7791 return IPR_RC_JOB_RETURN;
7795 * ipr_init_res_table - Initialize the resource table
7796 * @ipr_cmd: ipr command struct
7798 * This function looks through the existing resource table, comparing
7799 * it with the config table. This function will take care of old/new
7800 * devices and schedule adding/removing them from the mid-layer
7804 * IPR_RC_JOB_CONTINUE
7806 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7808 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7809 struct ipr_resource_entry *res, *temp;
7810 struct ipr_config_table_entry_wrapper cfgtew;
7811 int entries, found, flag, i;
7816 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7818 flag = ioa_cfg->u.cfg_table->hdr.flags;
7820 if (flag & IPR_UCODE_DOWNLOAD_REQ)
7821 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7823 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7824 list_move_tail(&res->queue, &old_res);
7827 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7829 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7831 for (i = 0; i < entries; i++) {
7833 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7835 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7838 list_for_each_entry_safe(res, temp, &old_res, queue) {
7839 if (ipr_is_same_device(res, &cfgtew)) {
7840 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7847 if (list_empty(&ioa_cfg->free_res_q)) {
7848 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7853 res = list_entry(ioa_cfg->free_res_q.next,
7854 struct ipr_resource_entry, queue);
7855 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7856 ipr_init_res_entry(res, &cfgtew);
7858 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7859 res->sdev->allow_restart = 1;
7862 ipr_update_res_entry(res, &cfgtew);
7865 list_for_each_entry_safe(res, temp, &old_res, queue) {
7867 res->del_from_ml = 1;
7868 res->res_handle = IPR_INVALID_RES_HANDLE;
7869 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7873 list_for_each_entry_safe(res, temp, &old_res, queue) {
7874 ipr_clear_res_target(res);
7875 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7878 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7879 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7881 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7884 return IPR_RC_JOB_CONTINUE;
7888 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7889 * @ipr_cmd: ipr command struct
7891 * This function sends a Query IOA Configuration command
7892 * to the adapter to retrieve the IOA configuration table.
7897 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7899 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7900 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7901 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7902 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7905 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7906 ioa_cfg->dual_raid = 1;
7907 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7908 ucode_vpd->major_release, ucode_vpd->card_type,
7909 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7910 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7911 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7913 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7914 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7915 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7916 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7918 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7919 IPR_IOADL_FLAGS_READ_LAST);
7921 ipr_cmd->job_step = ipr_init_res_table;
7923 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7926 return IPR_RC_JOB_RETURN;
7929 static int ipr_ioa_service_action_failed(struct ipr_cmnd *ipr_cmd)
7931 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7933 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT)
7934 return IPR_RC_JOB_CONTINUE;
7936 return ipr_reset_cmd_failed(ipr_cmd);
7939 static void ipr_build_ioa_service_action(struct ipr_cmnd *ipr_cmd,
7940 __be32 res_handle, u8 sa_code)
7942 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7944 ioarcb->res_handle = res_handle;
7945 ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION;
7946 ioarcb->cmd_pkt.cdb[1] = sa_code;
7947 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7951 * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7957 static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd *ipr_cmd)
7959 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7960 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7961 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7965 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7967 if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) {
7968 ipr_build_ioa_service_action(ipr_cmd,
7969 cpu_to_be32(IPR_IOA_RES_HANDLE),
7970 IPR_IOA_SA_CHANGE_CACHE_PARAMS);
7972 ioarcb->cmd_pkt.cdb[2] = 0x40;
7974 ipr_cmd->job_step_failed = ipr_ioa_service_action_failed;
7975 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7976 IPR_SET_SUP_DEVICE_TIMEOUT);
7979 return IPR_RC_JOB_RETURN;
7983 return IPR_RC_JOB_CONTINUE;
7987 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7988 * @ipr_cmd: ipr command struct
7990 * This utility function sends an inquiry to the adapter.
7995 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7996 dma_addr_t dma_addr, u8 xfer_len)
7998 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
8001 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8002 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8004 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
8005 ioarcb->cmd_pkt.cdb[1] = flags;
8006 ioarcb->cmd_pkt.cdb[2] = page;
8007 ioarcb->cmd_pkt.cdb[4] = xfer_len;
8009 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
8011 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
8016 * ipr_inquiry_page_supported - Is the given inquiry page supported
8017 * @page0: inquiry page 0 buffer
8020 * This function determines if the specified inquiry page is supported.
8023 * 1 if page is supported / 0 if not
8025 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
8029 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
8030 if (page0->page[i] == page)
8037 * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
8038 * @ipr_cmd: ipr command struct
8040 * This function sends a Page 0xC4 inquiry to the adapter
8041 * to retrieve software VPD information.
8044 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8046 static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd *ipr_cmd)
8048 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8049 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8050 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
8053 ipr_cmd->job_step = ipr_ioafp_set_caching_parameters;
8054 memset(pageC4, 0, sizeof(*pageC4));
8056 if (ipr_inquiry_page_supported(page0, 0xC4)) {
8057 ipr_ioafp_inquiry(ipr_cmd, 1, 0xC4,
8058 (ioa_cfg->vpd_cbs_dma
8059 + offsetof(struct ipr_misc_cbs,
8061 sizeof(struct ipr_inquiry_pageC4));
8062 return IPR_RC_JOB_RETURN;
8066 return IPR_RC_JOB_CONTINUE;
8070 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
8071 * @ipr_cmd: ipr command struct
8073 * This function sends a Page 0xD0 inquiry to the adapter
8074 * to retrieve adapter capabilities.
8077 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8079 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
8081 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8082 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8083 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
8086 ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry;
8087 memset(cap, 0, sizeof(*cap));
8089 if (ipr_inquiry_page_supported(page0, 0xD0)) {
8090 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
8091 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
8092 sizeof(struct ipr_inquiry_cap));
8093 return IPR_RC_JOB_RETURN;
8097 return IPR_RC_JOB_CONTINUE;
8101 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
8102 * @ipr_cmd: ipr command struct
8104 * This function sends a Page 3 inquiry to the adapter
8105 * to retrieve software VPD information.
8108 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8110 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
8112 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8116 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
8118 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
8119 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
8120 sizeof(struct ipr_inquiry_page3));
8123 return IPR_RC_JOB_RETURN;
8127 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
8128 * @ipr_cmd: ipr command struct
8130 * This function sends a Page 0 inquiry to the adapter
8131 * to retrieve supported inquiry pages.
8134 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8136 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
8138 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8143 /* Grab the type out of the VPD and store it away */
8144 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
8146 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
8148 if (ipr_invalid_adapter(ioa_cfg)) {
8149 dev_err(&ioa_cfg->pdev->dev,
8150 "Adapter not supported in this hardware configuration.\n");
8152 if (!ipr_testmode) {
8153 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
8154 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8155 list_add_tail(&ipr_cmd->queue,
8156 &ioa_cfg->hrrq->hrrq_free_q);
8157 return IPR_RC_JOB_RETURN;
8161 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
8163 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
8164 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
8165 sizeof(struct ipr_inquiry_page0));
8168 return IPR_RC_JOB_RETURN;
8172 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
8173 * @ipr_cmd: ipr command struct
8175 * This function sends a standard inquiry to the adapter.
8180 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
8182 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8185 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
8187 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
8188 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
8189 sizeof(struct ipr_ioa_vpd));
8192 return IPR_RC_JOB_RETURN;
8196 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
8197 * @ipr_cmd: ipr command struct
8199 * This function send an Identify Host Request Response Queue
8200 * command to establish the HRRQ with the adapter.
8205 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
8207 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8208 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
8209 struct ipr_hrr_queue *hrrq;
8212 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
8213 if (ioa_cfg->identify_hrrq_index == 0)
8214 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
8216 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
8217 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
8219 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
8220 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8222 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8224 ioarcb->cmd_pkt.cdb[1] = 0x1;
8226 if (ioa_cfg->nvectors == 1)
8227 ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
8229 ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
8231 ioarcb->cmd_pkt.cdb[2] =
8232 ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
8233 ioarcb->cmd_pkt.cdb[3] =
8234 ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
8235 ioarcb->cmd_pkt.cdb[4] =
8236 ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
8237 ioarcb->cmd_pkt.cdb[5] =
8238 ((u64) hrrq->host_rrq_dma) & 0xff;
8239 ioarcb->cmd_pkt.cdb[7] =
8240 ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
8241 ioarcb->cmd_pkt.cdb[8] =
8242 (sizeof(u32) * hrrq->size) & 0xff;
8244 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8245 ioarcb->cmd_pkt.cdb[9] =
8246 ioa_cfg->identify_hrrq_index;
8248 if (ioa_cfg->sis64) {
8249 ioarcb->cmd_pkt.cdb[10] =
8250 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
8251 ioarcb->cmd_pkt.cdb[11] =
8252 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
8253 ioarcb->cmd_pkt.cdb[12] =
8254 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
8255 ioarcb->cmd_pkt.cdb[13] =
8256 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
8259 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8260 ioarcb->cmd_pkt.cdb[14] =
8261 ioa_cfg->identify_hrrq_index;
8263 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8264 IPR_INTERNAL_TIMEOUT);
8266 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
8267 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8270 return IPR_RC_JOB_RETURN;
8274 return IPR_RC_JOB_CONTINUE;
8278 * ipr_reset_timer_done - Adapter reset timer function
8279 * @ipr_cmd: ipr command struct
8281 * Description: This function is used in adapter reset processing
8282 * for timing events. If the reset_cmd pointer in the IOA
8283 * config struct is not this adapter's we are doing nested
8284 * resets and fail_all_ops will take care of freeing the
8290 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
8292 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8293 unsigned long lock_flags = 0;
8295 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8297 if (ioa_cfg->reset_cmd == ipr_cmd) {
8298 list_del(&ipr_cmd->queue);
8299 ipr_cmd->done(ipr_cmd);
8302 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8306 * ipr_reset_start_timer - Start a timer for adapter reset job
8307 * @ipr_cmd: ipr command struct
8308 * @timeout: timeout value
8310 * Description: This function is used in adapter reset processing
8311 * for timing events. If the reset_cmd pointer in the IOA
8312 * config struct is not this adapter's we are doing nested
8313 * resets and fail_all_ops will take care of freeing the
8319 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
8320 unsigned long timeout)
8324 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8325 ipr_cmd->done = ipr_reset_ioa_job;
8327 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8328 ipr_cmd->timer.expires = jiffies + timeout;
8329 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
8330 add_timer(&ipr_cmd->timer);
8334 * ipr_init_ioa_mem - Initialize ioa_cfg control block
8335 * @ioa_cfg: ioa cfg struct
8340 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
8342 struct ipr_hrr_queue *hrrq;
8344 for_each_hrrq(hrrq, ioa_cfg) {
8345 spin_lock(&hrrq->_lock);
8346 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
8348 /* Initialize Host RRQ pointers */
8349 hrrq->hrrq_start = hrrq->host_rrq;
8350 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
8351 hrrq->hrrq_curr = hrrq->hrrq_start;
8352 hrrq->toggle_bit = 1;
8353 spin_unlock(&hrrq->_lock);
8357 ioa_cfg->identify_hrrq_index = 0;
8358 if (ioa_cfg->hrrq_num == 1)
8359 atomic_set(&ioa_cfg->hrrq_index, 0);
8361 atomic_set(&ioa_cfg->hrrq_index, 1);
8363 /* Zero out config table */
8364 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
8368 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
8369 * @ipr_cmd: ipr command struct
8372 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8374 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
8376 unsigned long stage, stage_time;
8378 volatile u32 int_reg;
8379 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8382 feedback = readl(ioa_cfg->regs.init_feedback_reg);
8383 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
8384 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
8386 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
8388 /* sanity check the stage_time value */
8389 if (stage_time == 0)
8390 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
8391 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
8392 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
8393 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
8394 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
8396 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
8397 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
8398 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8399 stage_time = ioa_cfg->transop_timeout;
8400 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8401 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
8402 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8403 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8404 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8405 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8406 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
8407 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
8408 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8409 return IPR_RC_JOB_CONTINUE;
8413 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8414 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
8415 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
8416 ipr_cmd->done = ipr_reset_ioa_job;
8417 add_timer(&ipr_cmd->timer);
8419 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8421 return IPR_RC_JOB_RETURN;
8425 * ipr_reset_enable_ioa - Enable the IOA following a reset.
8426 * @ipr_cmd: ipr command struct
8428 * This function reinitializes some control blocks and
8429 * enables destructive diagnostics on the adapter.
8434 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
8436 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8437 volatile u32 int_reg;
8438 volatile u64 maskval;
8442 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8443 ipr_init_ioa_mem(ioa_cfg);
8445 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8446 spin_lock(&ioa_cfg->hrrq[i]._lock);
8447 ioa_cfg->hrrq[i].allow_interrupts = 1;
8448 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8451 if (ioa_cfg->sis64) {
8452 /* Set the adapter to the correct endian mode. */
8453 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8454 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8457 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8459 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8460 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
8461 ioa_cfg->regs.clr_interrupt_mask_reg32);
8462 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8463 return IPR_RC_JOB_CONTINUE;
8466 /* Enable destructive diagnostics on IOA */
8467 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
8469 if (ioa_cfg->sis64) {
8470 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8471 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
8472 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
8474 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
8476 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8478 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
8480 if (ioa_cfg->sis64) {
8481 ipr_cmd->job_step = ipr_reset_next_stage;
8482 return IPR_RC_JOB_CONTINUE;
8485 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8486 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
8487 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
8488 ipr_cmd->done = ipr_reset_ioa_job;
8489 add_timer(&ipr_cmd->timer);
8490 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8493 return IPR_RC_JOB_RETURN;
8497 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8498 * @ipr_cmd: ipr command struct
8500 * This function is invoked when an adapter dump has run out
8501 * of processing time.
8504 * IPR_RC_JOB_CONTINUE
8506 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8508 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8510 if (ioa_cfg->sdt_state == GET_DUMP)
8511 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8512 else if (ioa_cfg->sdt_state == READ_DUMP)
8513 ioa_cfg->sdt_state = ABORT_DUMP;
8515 ioa_cfg->dump_timeout = 1;
8516 ipr_cmd->job_step = ipr_reset_alert;
8518 return IPR_RC_JOB_CONTINUE;
8522 * ipr_unit_check_no_data - Log a unit check/no data error log
8523 * @ioa_cfg: ioa config struct
8525 * Logs an error indicating the adapter unit checked, but for some
8526 * reason, we were unable to fetch the unit check buffer.
8531 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8533 ioa_cfg->errors_logged++;
8534 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8538 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8539 * @ioa_cfg: ioa config struct
8541 * Fetches the unit check buffer from the adapter by clocking the data
8542 * through the mailbox register.
8547 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8549 unsigned long mailbox;
8550 struct ipr_hostrcb *hostrcb;
8551 struct ipr_uc_sdt sdt;
8555 mailbox = readl(ioa_cfg->ioa_mailbox);
8557 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
8558 ipr_unit_check_no_data(ioa_cfg);
8562 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8563 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8564 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8566 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8567 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8568 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
8569 ipr_unit_check_no_data(ioa_cfg);
8573 /* Find length of the first sdt entry (UC buffer) */
8574 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8575 length = be32_to_cpu(sdt.entry[0].end_token);
8577 length = (be32_to_cpu(sdt.entry[0].end_token) -
8578 be32_to_cpu(sdt.entry[0].start_token)) &
8579 IPR_FMT2_MBX_ADDR_MASK;
8581 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8582 struct ipr_hostrcb, queue);
8583 list_del_init(&hostrcb->queue);
8584 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8586 rc = ipr_get_ldump_data_section(ioa_cfg,
8587 be32_to_cpu(sdt.entry[0].start_token),
8588 (__be32 *)&hostrcb->hcam,
8589 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8592 ipr_handle_log_data(ioa_cfg, hostrcb);
8593 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
8594 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8595 ioa_cfg->sdt_state == GET_DUMP)
8596 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8598 ipr_unit_check_no_data(ioa_cfg);
8600 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8604 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8605 * @ipr_cmd: ipr command struct
8607 * Description: This function will call to get the unit check buffer.
8612 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8614 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8617 ioa_cfg->ioa_unit_checked = 0;
8618 ipr_get_unit_check_buffer(ioa_cfg);
8619 ipr_cmd->job_step = ipr_reset_alert;
8620 ipr_reset_start_timer(ipr_cmd, 0);
8623 return IPR_RC_JOB_RETURN;
8626 static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd)
8628 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8632 if (ioa_cfg->sdt_state != GET_DUMP)
8633 return IPR_RC_JOB_RETURN;
8635 if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left ||
8636 (readl(ioa_cfg->regs.sense_interrupt_reg) &
8637 IPR_PCII_MAILBOX_STABLE)) {
8639 if (!ipr_cmd->u.time_left)
8640 dev_err(&ioa_cfg->pdev->dev,
8641 "Timed out waiting for Mailbox register.\n");
8643 ioa_cfg->sdt_state = READ_DUMP;
8644 ioa_cfg->dump_timeout = 0;
8646 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8648 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8649 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8650 schedule_work(&ioa_cfg->work_q);
8653 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8654 ipr_reset_start_timer(ipr_cmd,
8655 IPR_CHECK_FOR_RESET_TIMEOUT);
8659 return IPR_RC_JOB_RETURN;
8663 * ipr_reset_restore_cfg_space - Restore PCI config space.
8664 * @ipr_cmd: ipr command struct
8666 * Description: This function restores the saved PCI config space of
8667 * the adapter, fails all outstanding ops back to the callers, and
8668 * fetches the dump/unit check if applicable to this reset.
8671 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8673 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8675 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8679 ioa_cfg->pdev->state_saved = true;
8680 pci_restore_state(ioa_cfg->pdev);
8682 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8683 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8684 return IPR_RC_JOB_CONTINUE;
8687 ipr_fail_all_ops(ioa_cfg);
8689 if (ioa_cfg->sis64) {
8690 /* Set the adapter to the correct endian mode. */
8691 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8692 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8695 if (ioa_cfg->ioa_unit_checked) {
8696 if (ioa_cfg->sis64) {
8697 ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8698 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8699 return IPR_RC_JOB_RETURN;
8701 ioa_cfg->ioa_unit_checked = 0;
8702 ipr_get_unit_check_buffer(ioa_cfg);
8703 ipr_cmd->job_step = ipr_reset_alert;
8704 ipr_reset_start_timer(ipr_cmd, 0);
8705 return IPR_RC_JOB_RETURN;
8709 if (ioa_cfg->in_ioa_bringdown) {
8710 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8711 } else if (ioa_cfg->sdt_state == GET_DUMP) {
8712 ipr_cmd->job_step = ipr_dump_mailbox_wait;
8713 ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX;
8715 ipr_cmd->job_step = ipr_reset_enable_ioa;
8719 return IPR_RC_JOB_CONTINUE;
8723 * ipr_reset_bist_done - BIST has completed on the adapter.
8724 * @ipr_cmd: ipr command struct
8726 * Description: Unblock config space and resume the reset process.
8729 * IPR_RC_JOB_CONTINUE
8731 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8733 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8736 if (ioa_cfg->cfg_locked)
8737 pci_cfg_access_unlock(ioa_cfg->pdev);
8738 ioa_cfg->cfg_locked = 0;
8739 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8741 return IPR_RC_JOB_CONTINUE;
8745 * ipr_reset_start_bist - Run BIST on the adapter.
8746 * @ipr_cmd: ipr command struct
8748 * Description: This function runs BIST on the adapter, then delays 2 seconds.
8751 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8753 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8755 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8756 int rc = PCIBIOS_SUCCESSFUL;
8759 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8760 writel(IPR_UPROCI_SIS64_START_BIST,
8761 ioa_cfg->regs.set_uproc_interrupt_reg32);
8763 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8765 if (rc == PCIBIOS_SUCCESSFUL) {
8766 ipr_cmd->job_step = ipr_reset_bist_done;
8767 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8768 rc = IPR_RC_JOB_RETURN;
8770 if (ioa_cfg->cfg_locked)
8771 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8772 ioa_cfg->cfg_locked = 0;
8773 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8774 rc = IPR_RC_JOB_CONTINUE;
8782 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8783 * @ipr_cmd: ipr command struct
8785 * Description: This clears PCI reset to the adapter and delays two seconds.
8790 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8793 ipr_cmd->job_step = ipr_reset_bist_done;
8794 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8796 return IPR_RC_JOB_RETURN;
8800 * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8801 * @work: work struct
8803 * Description: This pulses warm reset to a slot.
8806 static void ipr_reset_reset_work(struct work_struct *work)
8808 struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
8809 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8810 struct pci_dev *pdev = ioa_cfg->pdev;
8811 unsigned long lock_flags = 0;
8814 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8815 msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
8816 pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
8818 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8819 if (ioa_cfg->reset_cmd == ipr_cmd)
8820 ipr_reset_ioa_job(ipr_cmd);
8821 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8826 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8827 * @ipr_cmd: ipr command struct
8829 * Description: This asserts PCI reset to the adapter.
8834 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8836 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8839 INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
8840 queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
8841 ipr_cmd->job_step = ipr_reset_slot_reset_done;
8843 return IPR_RC_JOB_RETURN;
8847 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8848 * @ipr_cmd: ipr command struct
8850 * Description: This attempts to block config access to the IOA.
8853 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8855 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8857 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8858 int rc = IPR_RC_JOB_CONTINUE;
8860 if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8861 ioa_cfg->cfg_locked = 1;
8862 ipr_cmd->job_step = ioa_cfg->reset;
8864 if (ipr_cmd->u.time_left) {
8865 rc = IPR_RC_JOB_RETURN;
8866 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8867 ipr_reset_start_timer(ipr_cmd,
8868 IPR_CHECK_FOR_RESET_TIMEOUT);
8870 ipr_cmd->job_step = ioa_cfg->reset;
8871 dev_err(&ioa_cfg->pdev->dev,
8872 "Timed out waiting to lock config access. Resetting anyway.\n");
8880 * ipr_reset_block_config_access - Block config access to the IOA
8881 * @ipr_cmd: ipr command struct
8883 * Description: This attempts to block config access to the IOA
8886 * IPR_RC_JOB_CONTINUE
8888 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8890 ipr_cmd->ioa_cfg->cfg_locked = 0;
8891 ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8892 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8893 return IPR_RC_JOB_CONTINUE;
8897 * ipr_reset_allowed - Query whether or not IOA can be reset
8898 * @ioa_cfg: ioa config struct
8901 * 0 if reset not allowed / non-zero if reset is allowed
8903 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8905 volatile u32 temp_reg;
8907 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8908 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8912 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8913 * @ipr_cmd: ipr command struct
8915 * Description: This function waits for adapter permission to run BIST,
8916 * then runs BIST. If the adapter does not give permission after a
8917 * reasonable time, we will reset the adapter anyway. The impact of
8918 * resetting the adapter without warning the adapter is the risk of
8919 * losing the persistent error log on the adapter. If the adapter is
8920 * reset while it is writing to the flash on the adapter, the flash
8921 * segment will have bad ECC and be zeroed.
8924 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8926 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8928 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8929 int rc = IPR_RC_JOB_RETURN;
8931 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8932 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8933 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8935 ipr_cmd->job_step = ipr_reset_block_config_access;
8936 rc = IPR_RC_JOB_CONTINUE;
8943 * ipr_reset_alert - Alert the adapter of a pending reset
8944 * @ipr_cmd: ipr command struct
8946 * Description: This function alerts the adapter that it will be reset.
8947 * If memory space is not currently enabled, proceed directly
8948 * to running BIST on the adapter. The timer must always be started
8949 * so we guarantee we do not run BIST from ipr_isr.
8954 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8956 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8961 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8963 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8964 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8965 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8966 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8968 ipr_cmd->job_step = ipr_reset_block_config_access;
8971 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8972 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8975 return IPR_RC_JOB_RETURN;
8979 * ipr_reset_quiesce_done - Complete IOA disconnect
8980 * @ipr_cmd: ipr command struct
8982 * Description: Freeze the adapter to complete quiesce processing
8985 * IPR_RC_JOB_CONTINUE
8987 static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
8989 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8992 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8993 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8995 return IPR_RC_JOB_CONTINUE;
8999 * ipr_reset_cancel_hcam_done - Check for outstanding commands
9000 * @ipr_cmd: ipr command struct
9002 * Description: Ensure nothing is outstanding to the IOA and
9003 * proceed with IOA disconnect. Otherwise reset the IOA.
9006 * IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
9008 static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
9010 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9011 struct ipr_cmnd *loop_cmd;
9012 struct ipr_hrr_queue *hrrq;
9013 int rc = IPR_RC_JOB_CONTINUE;
9017 ipr_cmd->job_step = ipr_reset_quiesce_done;
9019 for_each_hrrq(hrrq, ioa_cfg) {
9020 spin_lock(&hrrq->_lock);
9021 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
9023 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9024 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9025 rc = IPR_RC_JOB_RETURN;
9028 spin_unlock(&hrrq->_lock);
9039 * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
9040 * @ipr_cmd: ipr command struct
9042 * Description: Cancel any oustanding HCAMs to the IOA.
9045 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9047 static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
9049 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9050 int rc = IPR_RC_JOB_CONTINUE;
9051 struct ipr_cmd_pkt *cmd_pkt;
9052 struct ipr_cmnd *hcam_cmd;
9053 struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
9056 ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
9058 if (!hrrq->ioa_is_dead) {
9059 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
9060 list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
9061 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
9064 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9065 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9066 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
9067 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
9068 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
9069 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
9070 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
9071 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
9072 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
9073 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
9074 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
9075 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
9076 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
9077 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
9079 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9080 IPR_CANCEL_TIMEOUT);
9082 rc = IPR_RC_JOB_RETURN;
9083 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9088 ipr_cmd->job_step = ipr_reset_alert;
9095 * ipr_reset_ucode_download_done - Microcode download completion
9096 * @ipr_cmd: ipr command struct
9098 * Description: This function unmaps the microcode download buffer.
9101 * IPR_RC_JOB_CONTINUE
9103 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
9105 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9106 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9108 dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
9109 sglist->num_sg, DMA_TO_DEVICE);
9111 ipr_cmd->job_step = ipr_reset_alert;
9112 return IPR_RC_JOB_CONTINUE;
9116 * ipr_reset_ucode_download - Download microcode to the adapter
9117 * @ipr_cmd: ipr command struct
9119 * Description: This function checks to see if it there is microcode
9120 * to download to the adapter. If there is, a download is performed.
9123 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9125 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
9127 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9128 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9131 ipr_cmd->job_step = ipr_reset_alert;
9134 return IPR_RC_JOB_CONTINUE;
9136 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9137 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
9138 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
9139 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
9140 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
9141 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
9142 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
9145 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
9147 ipr_build_ucode_ioadl(ipr_cmd, sglist);
9148 ipr_cmd->job_step = ipr_reset_ucode_download_done;
9150 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9151 IPR_WRITE_BUFFER_TIMEOUT);
9154 return IPR_RC_JOB_RETURN;
9158 * ipr_reset_shutdown_ioa - Shutdown the adapter
9159 * @ipr_cmd: ipr command struct
9161 * Description: This function issues an adapter shutdown of the
9162 * specified type to the specified adapter as part of the
9163 * adapter reset job.
9166 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9168 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
9170 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9171 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
9172 unsigned long timeout;
9173 int rc = IPR_RC_JOB_CONTINUE;
9176 if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
9177 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9178 else if (shutdown_type != IPR_SHUTDOWN_NONE &&
9179 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
9180 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9181 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9182 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
9183 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
9185 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
9186 timeout = IPR_SHUTDOWN_TIMEOUT;
9187 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
9188 timeout = IPR_INTERNAL_TIMEOUT;
9189 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
9190 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
9192 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
9194 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
9196 rc = IPR_RC_JOB_RETURN;
9197 ipr_cmd->job_step = ipr_reset_ucode_download;
9199 ipr_cmd->job_step = ipr_reset_alert;
9206 * ipr_reset_ioa_job - Adapter reset job
9207 * @ipr_cmd: ipr command struct
9209 * Description: This function is the job router for the adapter reset job.
9214 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
9217 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9220 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
9222 if (ioa_cfg->reset_cmd != ipr_cmd) {
9224 * We are doing nested adapter resets and this is
9225 * not the current reset job.
9227 list_add_tail(&ipr_cmd->queue,
9228 &ipr_cmd->hrrq->hrrq_free_q);
9232 if (IPR_IOASC_SENSE_KEY(ioasc)) {
9233 rc = ipr_cmd->job_step_failed(ipr_cmd);
9234 if (rc == IPR_RC_JOB_RETURN)
9238 ipr_reinit_ipr_cmnd(ipr_cmd);
9239 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
9240 rc = ipr_cmd->job_step(ipr_cmd);
9241 } while (rc == IPR_RC_JOB_CONTINUE);
9245 * _ipr_initiate_ioa_reset - Initiate an adapter reset
9246 * @ioa_cfg: ioa config struct
9247 * @job_step: first job step of reset job
9248 * @shutdown_type: shutdown type
9250 * Description: This function will initiate the reset of the given adapter
9251 * starting at the selected job step.
9252 * If the caller needs to wait on the completion of the reset,
9253 * the caller must sleep on the reset_wait_q.
9258 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9259 int (*job_step) (struct ipr_cmnd *),
9260 enum ipr_shutdown_type shutdown_type)
9262 struct ipr_cmnd *ipr_cmd;
9265 ioa_cfg->in_reset_reload = 1;
9266 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9267 spin_lock(&ioa_cfg->hrrq[i]._lock);
9268 ioa_cfg->hrrq[i].allow_cmds = 0;
9269 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9272 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9273 ioa_cfg->scsi_unblock = 0;
9274 ioa_cfg->scsi_blocked = 1;
9275 scsi_block_requests(ioa_cfg->host);
9278 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9279 ioa_cfg->reset_cmd = ipr_cmd;
9280 ipr_cmd->job_step = job_step;
9281 ipr_cmd->u.shutdown_type = shutdown_type;
9283 ipr_reset_ioa_job(ipr_cmd);
9287 * ipr_initiate_ioa_reset - Initiate an adapter reset
9288 * @ioa_cfg: ioa config struct
9289 * @shutdown_type: shutdown type
9291 * Description: This function will initiate the reset of the given adapter.
9292 * If the caller needs to wait on the completion of the reset,
9293 * the caller must sleep on the reset_wait_q.
9298 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9299 enum ipr_shutdown_type shutdown_type)
9303 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
9306 if (ioa_cfg->in_reset_reload) {
9307 if (ioa_cfg->sdt_state == GET_DUMP)
9308 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9309 else if (ioa_cfg->sdt_state == READ_DUMP)
9310 ioa_cfg->sdt_state = ABORT_DUMP;
9313 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
9314 dev_err(&ioa_cfg->pdev->dev,
9315 "IOA taken offline - error recovery failed\n");
9317 ioa_cfg->reset_retries = 0;
9318 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9319 spin_lock(&ioa_cfg->hrrq[i]._lock);
9320 ioa_cfg->hrrq[i].ioa_is_dead = 1;
9321 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9325 if (ioa_cfg->in_ioa_bringdown) {
9326 ioa_cfg->reset_cmd = NULL;
9327 ioa_cfg->in_reset_reload = 0;
9328 ipr_fail_all_ops(ioa_cfg);
9329 wake_up_all(&ioa_cfg->reset_wait_q);
9331 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9332 ioa_cfg->scsi_unblock = 1;
9333 schedule_work(&ioa_cfg->work_q);
9337 ioa_cfg->in_ioa_bringdown = 1;
9338 shutdown_type = IPR_SHUTDOWN_NONE;
9342 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
9347 * ipr_reset_freeze - Hold off all I/O activity
9348 * @ipr_cmd: ipr command struct
9350 * Description: If the PCI slot is frozen, hold off all I/O
9351 * activity; then, as soon as the slot is available again,
9352 * initiate an adapter reset.
9354 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
9356 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9359 /* Disallow new interrupts, avoid loop */
9360 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9361 spin_lock(&ioa_cfg->hrrq[i]._lock);
9362 ioa_cfg->hrrq[i].allow_interrupts = 0;
9363 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9366 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
9367 ipr_cmd->done = ipr_reset_ioa_job;
9368 return IPR_RC_JOB_RETURN;
9372 * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
9373 * @pdev: PCI device struct
9375 * Description: This routine is called to tell us that the MMIO
9376 * access to the IOA has been restored
9378 static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
9380 unsigned long flags = 0;
9381 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9383 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9384 if (!ioa_cfg->probe_done)
9385 pci_save_state(pdev);
9386 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9387 return PCI_ERS_RESULT_NEED_RESET;
9391 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
9392 * @pdev: PCI device struct
9394 * Description: This routine is called to tell us that the PCI bus
9395 * is down. Can't do anything here, except put the device driver
9396 * into a holding pattern, waiting for the PCI bus to come back.
9398 static void ipr_pci_frozen(struct pci_dev *pdev)
9400 unsigned long flags = 0;
9401 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9403 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9404 if (ioa_cfg->probe_done)
9405 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
9406 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9410 * ipr_pci_slot_reset - Called when PCI slot has been reset.
9411 * @pdev: PCI device struct
9413 * Description: This routine is called by the pci error recovery
9414 * code after the PCI slot has been reset, just before we
9415 * should resume normal operations.
9417 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
9419 unsigned long flags = 0;
9420 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9422 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9423 if (ioa_cfg->probe_done) {
9424 if (ioa_cfg->needs_warm_reset)
9425 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9427 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
9430 wake_up_all(&ioa_cfg->eeh_wait_q);
9431 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9432 return PCI_ERS_RESULT_RECOVERED;
9436 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
9437 * @pdev: PCI device struct
9439 * Description: This routine is called when the PCI bus has
9440 * permanently failed.
9442 static void ipr_pci_perm_failure(struct pci_dev *pdev)
9444 unsigned long flags = 0;
9445 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9448 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9449 if (ioa_cfg->probe_done) {
9450 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9451 ioa_cfg->sdt_state = ABORT_DUMP;
9452 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
9453 ioa_cfg->in_ioa_bringdown = 1;
9454 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9455 spin_lock(&ioa_cfg->hrrq[i]._lock);
9456 ioa_cfg->hrrq[i].allow_cmds = 0;
9457 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9460 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9462 wake_up_all(&ioa_cfg->eeh_wait_q);
9463 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9467 * ipr_pci_error_detected - Called when a PCI error is detected.
9468 * @pdev: PCI device struct
9469 * @state: PCI channel state
9471 * Description: Called when a PCI error is detected.
9474 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
9476 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
9477 pci_channel_state_t state)
9480 case pci_channel_io_frozen:
9481 ipr_pci_frozen(pdev);
9482 return PCI_ERS_RESULT_CAN_RECOVER;
9483 case pci_channel_io_perm_failure:
9484 ipr_pci_perm_failure(pdev);
9485 return PCI_ERS_RESULT_DISCONNECT;
9490 return PCI_ERS_RESULT_NEED_RESET;
9494 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9495 * @ioa_cfg: ioa cfg struct
9497 * Description: This is the second phase of adapter initialization
9498 * This function takes care of initilizing the adapter to the point
9499 * where it can accept new commands.
9502 * 0 on success / -EIO on failure
9504 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
9507 unsigned long host_lock_flags = 0;
9510 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9511 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
9512 ioa_cfg->probe_done = 1;
9513 if (ioa_cfg->needs_hard_reset) {
9514 ioa_cfg->needs_hard_reset = 0;
9515 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9517 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
9519 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9526 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9527 * @ioa_cfg: ioa config struct
9532 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9536 if (ioa_cfg->ipr_cmnd_list) {
9537 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9538 if (ioa_cfg->ipr_cmnd_list[i])
9539 dma_pool_free(ioa_cfg->ipr_cmd_pool,
9540 ioa_cfg->ipr_cmnd_list[i],
9541 ioa_cfg->ipr_cmnd_list_dma[i]);
9543 ioa_cfg->ipr_cmnd_list[i] = NULL;
9547 if (ioa_cfg->ipr_cmd_pool)
9548 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
9550 kfree(ioa_cfg->ipr_cmnd_list);
9551 kfree(ioa_cfg->ipr_cmnd_list_dma);
9552 ioa_cfg->ipr_cmnd_list = NULL;
9553 ioa_cfg->ipr_cmnd_list_dma = NULL;
9554 ioa_cfg->ipr_cmd_pool = NULL;
9558 * ipr_free_mem - Frees memory allocated for an adapter
9559 * @ioa_cfg: ioa cfg struct
9564 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
9568 kfree(ioa_cfg->res_entries);
9569 dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
9570 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9571 ipr_free_cmd_blks(ioa_cfg);
9573 for (i = 0; i < ioa_cfg->hrrq_num; i++)
9574 dma_free_coherent(&ioa_cfg->pdev->dev,
9575 sizeof(u32) * ioa_cfg->hrrq[i].size,
9576 ioa_cfg->hrrq[i].host_rrq,
9577 ioa_cfg->hrrq[i].host_rrq_dma);
9579 dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
9580 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9582 for (i = 0; i < IPR_MAX_HCAMS; i++) {
9583 dma_free_coherent(&ioa_cfg->pdev->dev,
9584 sizeof(struct ipr_hostrcb),
9585 ioa_cfg->hostrcb[i],
9586 ioa_cfg->hostrcb_dma[i]);
9589 ipr_free_dump(ioa_cfg);
9590 kfree(ioa_cfg->trace);
9594 * ipr_free_irqs - Free all allocated IRQs for the adapter.
9595 * @ioa_cfg: ipr cfg struct
9597 * This function frees all allocated IRQs for the
9598 * specified adapter.
9603 static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
9605 struct pci_dev *pdev = ioa_cfg->pdev;
9608 for (i = 0; i < ioa_cfg->nvectors; i++)
9609 free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]);
9610 pci_free_irq_vectors(pdev);
9614 * ipr_free_all_resources - Free all allocated resources for an adapter.
9615 * @ipr_cmd: ipr command struct
9617 * This function frees all allocated resources for the
9618 * specified adapter.
9623 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
9625 struct pci_dev *pdev = ioa_cfg->pdev;
9628 ipr_free_irqs(ioa_cfg);
9629 if (ioa_cfg->reset_work_q)
9630 destroy_workqueue(ioa_cfg->reset_work_q);
9631 iounmap(ioa_cfg->hdw_dma_regs);
9632 pci_release_regions(pdev);
9633 ipr_free_mem(ioa_cfg);
9634 scsi_host_put(ioa_cfg->host);
9635 pci_disable_device(pdev);
9640 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9641 * @ioa_cfg: ioa config struct
9644 * 0 on success / -ENOMEM on allocation failure
9646 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9648 struct ipr_cmnd *ipr_cmd;
9649 struct ipr_ioarcb *ioarcb;
9650 dma_addr_t dma_addr;
9651 int i, entries_each_hrrq, hrrq_id = 0;
9653 ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
9654 sizeof(struct ipr_cmnd), 512, 0);
9656 if (!ioa_cfg->ipr_cmd_pool)
9659 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
9660 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
9662 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
9663 ipr_free_cmd_blks(ioa_cfg);
9667 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9668 if (ioa_cfg->hrrq_num > 1) {
9670 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
9671 ioa_cfg->hrrq[i].min_cmd_id = 0;
9672 ioa_cfg->hrrq[i].max_cmd_id =
9673 (entries_each_hrrq - 1);
9676 IPR_NUM_BASE_CMD_BLKS/
9677 (ioa_cfg->hrrq_num - 1);
9678 ioa_cfg->hrrq[i].min_cmd_id =
9679 IPR_NUM_INTERNAL_CMD_BLKS +
9680 (i - 1) * entries_each_hrrq;
9681 ioa_cfg->hrrq[i].max_cmd_id =
9682 (IPR_NUM_INTERNAL_CMD_BLKS +
9683 i * entries_each_hrrq - 1);
9686 entries_each_hrrq = IPR_NUM_CMD_BLKS;
9687 ioa_cfg->hrrq[i].min_cmd_id = 0;
9688 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9690 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9693 BUG_ON(ioa_cfg->hrrq_num == 0);
9695 i = IPR_NUM_CMD_BLKS -
9696 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9698 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9699 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9702 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9703 ipr_cmd = dma_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
9706 ipr_free_cmd_blks(ioa_cfg);
9710 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
9711 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9712 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9714 ioarcb = &ipr_cmd->ioarcb;
9715 ipr_cmd->dma_addr = dma_addr;
9717 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9719 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9721 ioarcb->host_response_handle = cpu_to_be32(i << 2);
9722 if (ioa_cfg->sis64) {
9723 ioarcb->u.sis64_addr_data.data_ioadl_addr =
9724 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9725 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
9726 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
9728 ioarcb->write_ioadl_addr =
9729 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9730 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9731 ioarcb->ioasa_host_pci_addr =
9732 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
9734 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9735 ipr_cmd->cmd_index = i;
9736 ipr_cmd->ioa_cfg = ioa_cfg;
9737 ipr_cmd->sense_buffer_dma = dma_addr +
9738 offsetof(struct ipr_cmnd, sense_buffer);
9740 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9741 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9742 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9743 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9751 * ipr_alloc_mem - Allocate memory for an adapter
9752 * @ioa_cfg: ioa config struct
9755 * 0 on success / non-zero for error
9757 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9759 struct pci_dev *pdev = ioa_cfg->pdev;
9760 int i, rc = -ENOMEM;
9763 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
9764 ioa_cfg->max_devs_supported, GFP_KERNEL);
9766 if (!ioa_cfg->res_entries)
9769 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
9770 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
9771 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9774 ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9775 sizeof(struct ipr_misc_cbs),
9776 &ioa_cfg->vpd_cbs_dma,
9779 if (!ioa_cfg->vpd_cbs)
9780 goto out_free_res_entries;
9782 if (ipr_alloc_cmd_blks(ioa_cfg))
9783 goto out_free_vpd_cbs;
9785 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9786 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
9787 sizeof(u32) * ioa_cfg->hrrq[i].size,
9788 &ioa_cfg->hrrq[i].host_rrq_dma,
9791 if (!ioa_cfg->hrrq[i].host_rrq) {
9793 dma_free_coherent(&pdev->dev,
9794 sizeof(u32) * ioa_cfg->hrrq[i].size,
9795 ioa_cfg->hrrq[i].host_rrq,
9796 ioa_cfg->hrrq[i].host_rrq_dma);
9797 goto out_ipr_free_cmd_blocks;
9799 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9802 ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9803 ioa_cfg->cfg_table_size,
9804 &ioa_cfg->cfg_table_dma,
9807 if (!ioa_cfg->u.cfg_table)
9808 goto out_free_host_rrq;
9810 for (i = 0; i < IPR_MAX_HCAMS; i++) {
9811 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9812 sizeof(struct ipr_hostrcb),
9813 &ioa_cfg->hostrcb_dma[i],
9816 if (!ioa_cfg->hostrcb[i])
9817 goto out_free_hostrcb_dma;
9819 ioa_cfg->hostrcb[i]->hostrcb_dma =
9820 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9821 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9822 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9825 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
9826 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
9828 if (!ioa_cfg->trace)
9829 goto out_free_hostrcb_dma;
9836 out_free_hostrcb_dma:
9838 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9839 ioa_cfg->hostrcb[i],
9840 ioa_cfg->hostrcb_dma[i]);
9842 dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9843 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9845 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9846 dma_free_coherent(&pdev->dev,
9847 sizeof(u32) * ioa_cfg->hrrq[i].size,
9848 ioa_cfg->hrrq[i].host_rrq,
9849 ioa_cfg->hrrq[i].host_rrq_dma);
9851 out_ipr_free_cmd_blocks:
9852 ipr_free_cmd_blks(ioa_cfg);
9854 dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9855 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9856 out_free_res_entries:
9857 kfree(ioa_cfg->res_entries);
9862 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9863 * @ioa_cfg: ioa config struct
9868 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9872 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9873 ioa_cfg->bus_attr[i].bus = i;
9874 ioa_cfg->bus_attr[i].qas_enabled = 0;
9875 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9876 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9877 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9879 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9884 * ipr_init_regs - Initialize IOA registers
9885 * @ioa_cfg: ioa config struct
9890 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9892 const struct ipr_interrupt_offsets *p;
9893 struct ipr_interrupts *t;
9896 p = &ioa_cfg->chip_cfg->regs;
9898 base = ioa_cfg->hdw_dma_regs;
9900 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9901 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9902 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9903 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9904 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9905 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9906 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9907 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9908 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9909 t->ioarrin_reg = base + p->ioarrin_reg;
9910 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9911 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9912 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9913 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9914 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9915 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9917 if (ioa_cfg->sis64) {
9918 t->init_feedback_reg = base + p->init_feedback_reg;
9919 t->dump_addr_reg = base + p->dump_addr_reg;
9920 t->dump_data_reg = base + p->dump_data_reg;
9921 t->endian_swap_reg = base + p->endian_swap_reg;
9926 * ipr_init_ioa_cfg - Initialize IOA config struct
9927 * @ioa_cfg: ioa config struct
9928 * @host: scsi host struct
9929 * @pdev: PCI dev struct
9934 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9935 struct Scsi_Host *host, struct pci_dev *pdev)
9939 ioa_cfg->host = host;
9940 ioa_cfg->pdev = pdev;
9941 ioa_cfg->log_level = ipr_log_level;
9942 ioa_cfg->doorbell = IPR_DOORBELL;
9943 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9944 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9945 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9946 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9947 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9948 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9950 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9951 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9952 INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q);
9953 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9954 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9955 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9956 INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread);
9957 init_waitqueue_head(&ioa_cfg->reset_wait_q);
9958 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9959 init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9960 ioa_cfg->sdt_state = INACTIVE;
9962 ipr_initialize_bus_attr(ioa_cfg);
9963 ioa_cfg->max_devs_supported = ipr_max_devs;
9965 if (ioa_cfg->sis64) {
9966 host->max_channel = IPR_MAX_SIS64_BUSES;
9967 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9968 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9969 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9970 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9971 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9972 + ((sizeof(struct ipr_config_table_entry64)
9973 * ioa_cfg->max_devs_supported)));
9975 host->max_channel = IPR_VSET_BUS;
9976 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9977 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9978 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9979 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9980 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9981 + ((sizeof(struct ipr_config_table_entry)
9982 * ioa_cfg->max_devs_supported)));
9985 host->unique_id = host->host_no;
9986 host->max_cmd_len = IPR_MAX_CDB_LEN;
9987 host->can_queue = ioa_cfg->max_cmds;
9988 pci_set_drvdata(pdev, ioa_cfg);
9990 for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9991 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9992 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9993 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9995 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9997 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
10002 * ipr_get_chip_info - Find adapter chip information
10003 * @dev_id: PCI device id struct
10006 * ptr to chip information on success / NULL on failure
10008 static const struct ipr_chip_t *
10009 ipr_get_chip_info(const struct pci_device_id *dev_id)
10013 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
10014 if (ipr_chip[i].vendor == dev_id->vendor &&
10015 ipr_chip[i].device == dev_id->device)
10016 return &ipr_chip[i];
10021 * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
10022 * during probe time
10023 * @ioa_cfg: ioa config struct
10028 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
10030 struct pci_dev *pdev = ioa_cfg->pdev;
10032 if (pci_channel_offline(pdev)) {
10033 wait_event_timeout(ioa_cfg->eeh_wait_q,
10034 !pci_channel_offline(pdev),
10035 IPR_PCI_ERROR_RECOVERY_TIMEOUT);
10036 pci_restore_state(pdev);
10040 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
10042 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
10044 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
10045 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
10046 "host%d-%d", ioa_cfg->host->host_no, vec_idx);
10047 ioa_cfg->vectors_info[vec_idx].
10048 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
10052 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
10053 struct pci_dev *pdev)
10057 for (i = 1; i < ioa_cfg->nvectors; i++) {
10058 rc = request_irq(pci_irq_vector(pdev, i),
10061 ioa_cfg->vectors_info[i].desc,
10062 &ioa_cfg->hrrq[i]);
10065 free_irq(pci_irq_vector(pdev, i),
10066 &ioa_cfg->hrrq[i]);
10074 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
10075 * @pdev: PCI device struct
10077 * Description: Simply set the msi_received flag to 1 indicating that
10078 * Message Signaled Interrupts are supported.
10081 * 0 on success / non-zero on failure
10083 static irqreturn_t ipr_test_intr(int irq, void *devp)
10085 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
10086 unsigned long lock_flags = 0;
10087 irqreturn_t rc = IRQ_HANDLED;
10089 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
10090 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10092 ioa_cfg->msi_received = 1;
10093 wake_up(&ioa_cfg->msi_wait_q);
10095 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10100 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
10101 * @pdev: PCI device struct
10103 * Description: This routine sets up and initiates a test interrupt to determine
10104 * if the interrupt is received via the ipr_test_intr() service routine.
10105 * If the tests fails, the driver will fall back to LSI.
10108 * 0 on success / non-zero on failure
10110 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
10113 volatile u32 int_reg;
10114 unsigned long lock_flags = 0;
10115 int irq = pci_irq_vector(pdev, 0);
10119 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10120 init_waitqueue_head(&ioa_cfg->msi_wait_q);
10121 ioa_cfg->msi_received = 0;
10122 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10123 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
10124 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
10125 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10127 rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
10129 dev_err(&pdev->dev, "Can not assign irq %d\n", irq);
10131 } else if (ipr_debug)
10132 dev_info(&pdev->dev, "IRQ assigned: %d\n", irq);
10134 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
10135 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
10136 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
10137 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10138 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10140 if (!ioa_cfg->msi_received) {
10141 /* MSI test failed */
10142 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
10144 } else if (ipr_debug)
10145 dev_info(&pdev->dev, "MSI test succeeded.\n");
10147 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10149 free_irq(irq, ioa_cfg);
10156 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
10157 * @pdev: PCI device struct
10158 * @dev_id: PCI device id struct
10161 * 0 on success / non-zero on failure
10163 static int ipr_probe_ioa(struct pci_dev *pdev,
10164 const struct pci_device_id *dev_id)
10166 struct ipr_ioa_cfg *ioa_cfg;
10167 struct Scsi_Host *host;
10168 unsigned long ipr_regs_pci;
10169 void __iomem *ipr_regs;
10170 int rc = PCIBIOS_SUCCESSFUL;
10171 volatile u32 mask, uproc, interrupts;
10172 unsigned long lock_flags, driver_lock_flags;
10173 unsigned int irq_flag;
10177 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
10178 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
10181 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
10186 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
10187 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
10188 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
10190 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
10192 if (!ioa_cfg->ipr_chip) {
10193 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
10194 dev_id->vendor, dev_id->device);
10195 goto out_scsi_host_put;
10198 /* set SIS 32 or SIS 64 */
10199 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
10200 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
10201 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
10202 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
10204 if (ipr_transop_timeout)
10205 ioa_cfg->transop_timeout = ipr_transop_timeout;
10206 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
10207 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
10209 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
10211 ioa_cfg->revid = pdev->revision;
10213 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
10215 ipr_regs_pci = pci_resource_start(pdev, 0);
10217 rc = pci_request_regions(pdev, IPR_NAME);
10219 dev_err(&pdev->dev,
10220 "Couldn't register memory range of registers\n");
10221 goto out_scsi_host_put;
10224 rc = pci_enable_device(pdev);
10226 if (rc || pci_channel_offline(pdev)) {
10227 if (pci_channel_offline(pdev)) {
10228 ipr_wait_for_pci_err_recovery(ioa_cfg);
10229 rc = pci_enable_device(pdev);
10233 dev_err(&pdev->dev, "Cannot enable adapter\n");
10234 ipr_wait_for_pci_err_recovery(ioa_cfg);
10235 goto out_release_regions;
10239 ipr_regs = pci_ioremap_bar(pdev, 0);
10242 dev_err(&pdev->dev,
10243 "Couldn't map memory range of registers\n");
10248 ioa_cfg->hdw_dma_regs = ipr_regs;
10249 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
10250 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
10252 ipr_init_regs(ioa_cfg);
10254 if (ioa_cfg->sis64) {
10255 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10257 dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
10258 rc = dma_set_mask_and_coherent(&pdev->dev,
10262 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10265 dev_err(&pdev->dev, "Failed to set DMA mask\n");
10266 goto cleanup_nomem;
10269 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
10270 ioa_cfg->chip_cfg->cache_line_size);
10272 if (rc != PCIBIOS_SUCCESSFUL) {
10273 dev_err(&pdev->dev, "Write of cache line size failed\n");
10274 ipr_wait_for_pci_err_recovery(ioa_cfg);
10276 goto cleanup_nomem;
10279 /* Issue MMIO read to ensure card is not in EEH */
10280 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
10281 ipr_wait_for_pci_err_recovery(ioa_cfg);
10283 if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
10284 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
10285 IPR_MAX_MSIX_VECTORS);
10286 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
10289 irq_flag = PCI_IRQ_LEGACY;
10290 if (ioa_cfg->ipr_chip->has_msi)
10291 irq_flag |= PCI_IRQ_MSI | PCI_IRQ_MSIX;
10292 rc = pci_alloc_irq_vectors(pdev, 1, ipr_number_of_msix, irq_flag);
10294 ipr_wait_for_pci_err_recovery(ioa_cfg);
10295 goto cleanup_nomem;
10297 ioa_cfg->nvectors = rc;
10299 if (!pdev->msi_enabled && !pdev->msix_enabled)
10300 ioa_cfg->clear_isr = 1;
10302 pci_set_master(pdev);
10304 if (pci_channel_offline(pdev)) {
10305 ipr_wait_for_pci_err_recovery(ioa_cfg);
10306 pci_set_master(pdev);
10307 if (pci_channel_offline(pdev)) {
10309 goto out_msi_disable;
10313 if (pdev->msi_enabled || pdev->msix_enabled) {
10314 rc = ipr_test_msi(ioa_cfg, pdev);
10317 dev_info(&pdev->dev,
10318 "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors,
10319 pdev->msix_enabled ? "-X" : "");
10322 ipr_wait_for_pci_err_recovery(ioa_cfg);
10323 pci_free_irq_vectors(pdev);
10325 ioa_cfg->nvectors = 1;
10326 ioa_cfg->clear_isr = 1;
10329 goto out_msi_disable;
10333 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
10334 (unsigned int)num_online_cpus(),
10335 (unsigned int)IPR_MAX_HRRQ_NUM);
10337 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
10338 goto out_msi_disable;
10340 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
10341 goto out_msi_disable;
10343 rc = ipr_alloc_mem(ioa_cfg);
10345 dev_err(&pdev->dev,
10346 "Couldn't allocate enough memory for device driver!\n");
10347 goto out_msi_disable;
10350 /* Save away PCI config space for use following IOA reset */
10351 rc = pci_save_state(pdev);
10353 if (rc != PCIBIOS_SUCCESSFUL) {
10354 dev_err(&pdev->dev, "Failed to save PCI config space\n");
10356 goto cleanup_nolog;
10360 * If HRRQ updated interrupt is not masked, or reset alert is set,
10361 * the card is in an unknown state and needs a hard reset
10363 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
10364 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
10365 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
10366 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
10367 ioa_cfg->needs_hard_reset = 1;
10368 if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
10369 ioa_cfg->needs_hard_reset = 1;
10370 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
10371 ioa_cfg->ioa_unit_checked = 1;
10373 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10374 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10375 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10377 if (pdev->msi_enabled || pdev->msix_enabled) {
10378 name_msi_vectors(ioa_cfg);
10379 rc = request_irq(pci_irq_vector(pdev, 0), ipr_isr, 0,
10380 ioa_cfg->vectors_info[0].desc,
10381 &ioa_cfg->hrrq[0]);
10383 rc = ipr_request_other_msi_irqs(ioa_cfg, pdev);
10385 rc = request_irq(pdev->irq, ipr_isr,
10387 IPR_NAME, &ioa_cfg->hrrq[0]);
10390 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
10392 goto cleanup_nolog;
10395 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
10396 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
10397 ioa_cfg->needs_warm_reset = 1;
10398 ioa_cfg->reset = ipr_reset_slot_reset;
10400 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
10401 WQ_MEM_RECLAIM, host->host_no);
10403 if (!ioa_cfg->reset_work_q) {
10404 dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
10409 ioa_cfg->reset = ipr_reset_start_bist;
10411 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10412 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
10413 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10420 ipr_free_irqs(ioa_cfg);
10422 ipr_free_mem(ioa_cfg);
10424 ipr_wait_for_pci_err_recovery(ioa_cfg);
10425 pci_free_irq_vectors(pdev);
10429 pci_disable_device(pdev);
10430 out_release_regions:
10431 pci_release_regions(pdev);
10433 scsi_host_put(host);
10438 * ipr_initiate_ioa_bringdown - Bring down an adapter
10439 * @ioa_cfg: ioa config struct
10440 * @shutdown_type: shutdown type
10442 * Description: This function will initiate bringing down the adapter.
10443 * This consists of issuing an IOA shutdown to the adapter
10444 * to flush the cache, and running BIST.
10445 * If the caller needs to wait on the completion of the reset,
10446 * the caller must sleep on the reset_wait_q.
10451 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
10452 enum ipr_shutdown_type shutdown_type)
10455 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
10456 ioa_cfg->sdt_state = ABORT_DUMP;
10457 ioa_cfg->reset_retries = 0;
10458 ioa_cfg->in_ioa_bringdown = 1;
10459 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
10464 * __ipr_remove - Remove a single adapter
10465 * @pdev: pci device struct
10467 * Adapter hot plug remove entry point.
10472 static void __ipr_remove(struct pci_dev *pdev)
10474 unsigned long host_lock_flags = 0;
10475 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10477 unsigned long driver_lock_flags;
10480 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10481 while (ioa_cfg->in_reset_reload) {
10482 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10483 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10484 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10487 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
10488 spin_lock(&ioa_cfg->hrrq[i]._lock);
10489 ioa_cfg->hrrq[i].removing_ioa = 1;
10490 spin_unlock(&ioa_cfg->hrrq[i]._lock);
10493 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10495 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10496 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10497 flush_work(&ioa_cfg->work_q);
10498 if (ioa_cfg->reset_work_q)
10499 flush_workqueue(ioa_cfg->reset_work_q);
10500 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
10501 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10503 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10504 list_del(&ioa_cfg->queue);
10505 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10507 if (ioa_cfg->sdt_state == ABORT_DUMP)
10508 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
10509 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10511 ipr_free_all_resources(ioa_cfg);
10517 * ipr_remove - IOA hot plug remove entry point
10518 * @pdev: pci device struct
10520 * Adapter hot plug remove entry point.
10525 static void ipr_remove(struct pci_dev *pdev)
10527 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10531 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10533 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10535 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10536 &ipr_ioa_async_err_log);
10537 scsi_remove_host(ioa_cfg->host);
10539 __ipr_remove(pdev);
10545 * ipr_probe - Adapter hot plug add entry point
10548 * 0 on success / non-zero on failure
10550 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
10552 struct ipr_ioa_cfg *ioa_cfg;
10553 unsigned long flags;
10556 rc = ipr_probe_ioa(pdev, dev_id);
10561 ioa_cfg = pci_get_drvdata(pdev);
10562 rc = ipr_probe_ioa_part2(ioa_cfg);
10565 __ipr_remove(pdev);
10569 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
10572 __ipr_remove(pdev);
10576 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
10580 scsi_remove_host(ioa_cfg->host);
10581 __ipr_remove(pdev);
10585 rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj,
10586 &ipr_ioa_async_err_log);
10589 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10591 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10593 scsi_remove_host(ioa_cfg->host);
10594 __ipr_remove(pdev);
10598 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
10602 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10603 &ipr_ioa_async_err_log);
10604 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10606 scsi_remove_host(ioa_cfg->host);
10607 __ipr_remove(pdev);
10610 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10611 ioa_cfg->scan_enabled = 1;
10612 schedule_work(&ioa_cfg->work_q);
10613 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10615 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
10617 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10618 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
10619 irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
10620 ioa_cfg->iopoll_weight, ipr_iopoll);
10624 scsi_scan_host(ioa_cfg->host);
10630 * ipr_shutdown - Shutdown handler.
10631 * @pdev: pci device struct
10633 * This function is invoked upon system shutdown/reboot. It will issue
10634 * an adapter shutdown to the adapter to flush the write cache.
10639 static void ipr_shutdown(struct pci_dev *pdev)
10641 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10642 unsigned long lock_flags = 0;
10643 enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
10646 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10647 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10648 ioa_cfg->iopoll_weight = 0;
10649 for (i = 1; i < ioa_cfg->hrrq_num; i++)
10650 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
10653 while (ioa_cfg->in_reset_reload) {
10654 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10655 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10656 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10659 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
10660 shutdown_type = IPR_SHUTDOWN_QUIESCE;
10662 ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
10663 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10664 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10665 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
10666 ipr_free_irqs(ioa_cfg);
10667 pci_disable_device(ioa_cfg->pdev);
10671 static struct pci_device_id ipr_pci_table[] = {
10672 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10673 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
10674 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10675 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
10676 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10677 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
10678 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10679 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
10680 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10681 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
10682 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10683 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
10684 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10685 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
10686 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10687 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10688 IPR_USE_LONG_TRANSOP_TIMEOUT },
10689 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10690 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10691 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10692 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10693 IPR_USE_LONG_TRANSOP_TIMEOUT },
10694 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10695 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10696 IPR_USE_LONG_TRANSOP_TIMEOUT },
10697 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10698 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10699 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10700 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10701 IPR_USE_LONG_TRANSOP_TIMEOUT},
10702 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10703 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10704 IPR_USE_LONG_TRANSOP_TIMEOUT },
10705 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10706 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10707 IPR_USE_LONG_TRANSOP_TIMEOUT },
10708 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10709 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10710 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10711 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10712 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10713 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
10714 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
10715 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
10716 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
10717 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10718 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
10719 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10720 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10721 IPR_USE_LONG_TRANSOP_TIMEOUT },
10722 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10723 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10724 IPR_USE_LONG_TRANSOP_TIMEOUT },
10725 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10726 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10727 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10728 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10729 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10730 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
10731 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10732 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10733 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10734 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10735 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10736 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
10737 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10738 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
10739 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10740 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
10741 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10742 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
10743 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10744 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10745 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10746 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
10747 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10748 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10749 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10750 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10751 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10752 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10753 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10754 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
10755 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10756 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10757 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10758 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10759 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10760 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10761 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10762 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10763 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10764 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10765 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10766 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10767 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10768 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10769 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10770 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
10771 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10772 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10773 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10774 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10775 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10776 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
10777 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10778 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580A, 0, 0, 0 },
10779 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10780 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580B, 0, 0, 0 },
10783 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10785 static const struct pci_error_handlers ipr_err_handler = {
10786 .error_detected = ipr_pci_error_detected,
10787 .mmio_enabled = ipr_pci_mmio_enabled,
10788 .slot_reset = ipr_pci_slot_reset,
10791 static struct pci_driver ipr_driver = {
10793 .id_table = ipr_pci_table,
10794 .probe = ipr_probe,
10795 .remove = ipr_remove,
10796 .shutdown = ipr_shutdown,
10797 .err_handler = &ipr_err_handler,
10801 * ipr_halt_done - Shutdown prepare completion
10806 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10808 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10812 * ipr_halt - Issue shutdown prepare to all adapters
10815 * NOTIFY_OK on success / NOTIFY_DONE on failure
10817 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10819 struct ipr_cmnd *ipr_cmd;
10820 struct ipr_ioa_cfg *ioa_cfg;
10821 unsigned long flags = 0, driver_lock_flags;
10823 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10824 return NOTIFY_DONE;
10826 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10828 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10829 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10830 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10831 (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
10832 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10836 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10837 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10838 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10839 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10840 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10842 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10843 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10845 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10850 static struct notifier_block ipr_notifier = {
10855 * ipr_init - Module entry point
10858 * 0 on success / negative value on failure
10860 static int __init ipr_init(void)
10864 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10865 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10867 register_reboot_notifier(&ipr_notifier);
10868 rc = pci_register_driver(&ipr_driver);
10870 unregister_reboot_notifier(&ipr_notifier);
10878 * ipr_exit - Module unload
10880 * Module unload entry point.
10885 static void __exit ipr_exit(void)
10887 unregister_reboot_notifier(&ipr_notifier);
10888 pci_unregister_driver(&ipr_driver);
10891 module_init(ipr_init);
10892 module_exit(ipr_exit);