2 * ipr.c -- driver for IBM Power Linux RAID adapters
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
6 * Copyright (C) 2003, 2004 IBM Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 * This driver is used to control the following SCSI adapters:
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
50 * - Tagged command queuing
51 * - Adapter microcode download
53 * - SCSI device hot plug
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
92 static LIST_HEAD(ipr_ioa_head);
93 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94 static unsigned int ipr_max_speed = 1;
95 static int ipr_testmode = 0;
96 static unsigned int ipr_fastfail = 0;
97 static unsigned int ipr_transop_timeout = 0;
98 static unsigned int ipr_debug = 0;
99 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100 static unsigned int ipr_dual_ioa_raid = 1;
101 static unsigned int ipr_number_of_msix = 16;
102 static unsigned int ipr_fast_reboot;
103 static DEFINE_SPINLOCK(ipr_driver_lock);
105 /* This table describes the differences between DMA controller chips */
106 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
107 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
110 .cache_line_size = 0x20,
114 .set_interrupt_mask_reg = 0x0022C,
115 .clr_interrupt_mask_reg = 0x00230,
116 .clr_interrupt_mask_reg32 = 0x00230,
117 .sense_interrupt_mask_reg = 0x0022C,
118 .sense_interrupt_mask_reg32 = 0x0022C,
119 .clr_interrupt_reg = 0x00228,
120 .clr_interrupt_reg32 = 0x00228,
121 .sense_interrupt_reg = 0x00224,
122 .sense_interrupt_reg32 = 0x00224,
123 .ioarrin_reg = 0x00404,
124 .sense_uproc_interrupt_reg = 0x00214,
125 .sense_uproc_interrupt_reg32 = 0x00214,
126 .set_uproc_interrupt_reg = 0x00214,
127 .set_uproc_interrupt_reg32 = 0x00214,
128 .clr_uproc_interrupt_reg = 0x00218,
129 .clr_uproc_interrupt_reg32 = 0x00218
132 { /* Snipe and Scamp */
135 .cache_line_size = 0x20,
139 .set_interrupt_mask_reg = 0x00288,
140 .clr_interrupt_mask_reg = 0x0028C,
141 .clr_interrupt_mask_reg32 = 0x0028C,
142 .sense_interrupt_mask_reg = 0x00288,
143 .sense_interrupt_mask_reg32 = 0x00288,
144 .clr_interrupt_reg = 0x00284,
145 .clr_interrupt_reg32 = 0x00284,
146 .sense_interrupt_reg = 0x00280,
147 .sense_interrupt_reg32 = 0x00280,
148 .ioarrin_reg = 0x00504,
149 .sense_uproc_interrupt_reg = 0x00290,
150 .sense_uproc_interrupt_reg32 = 0x00290,
151 .set_uproc_interrupt_reg = 0x00290,
152 .set_uproc_interrupt_reg32 = 0x00290,
153 .clr_uproc_interrupt_reg = 0x00294,
154 .clr_uproc_interrupt_reg32 = 0x00294
160 .cache_line_size = 0x20,
164 .set_interrupt_mask_reg = 0x00010,
165 .clr_interrupt_mask_reg = 0x00018,
166 .clr_interrupt_mask_reg32 = 0x0001C,
167 .sense_interrupt_mask_reg = 0x00010,
168 .sense_interrupt_mask_reg32 = 0x00014,
169 .clr_interrupt_reg = 0x00008,
170 .clr_interrupt_reg32 = 0x0000C,
171 .sense_interrupt_reg = 0x00000,
172 .sense_interrupt_reg32 = 0x00004,
173 .ioarrin_reg = 0x00070,
174 .sense_uproc_interrupt_reg = 0x00020,
175 .sense_uproc_interrupt_reg32 = 0x00024,
176 .set_uproc_interrupt_reg = 0x00020,
177 .set_uproc_interrupt_reg32 = 0x00024,
178 .clr_uproc_interrupt_reg = 0x00028,
179 .clr_uproc_interrupt_reg32 = 0x0002C,
180 .init_feedback_reg = 0x0005C,
181 .dump_addr_reg = 0x00064,
182 .dump_data_reg = 0x00068,
183 .endian_swap_reg = 0x00084
188 static const struct ipr_chip_t ipr_chip[] = {
189 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
194 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
196 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
197 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
198 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
201 static int ipr_max_bus_speeds[] = {
202 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
205 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
206 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
207 module_param_named(max_speed, ipr_max_speed, uint, 0);
208 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
209 module_param_named(log_level, ipr_log_level, uint, 0);
210 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
211 module_param_named(testmode, ipr_testmode, int, 0);
212 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
213 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
214 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
215 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
216 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
217 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
218 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
219 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
220 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
221 module_param_named(max_devs, ipr_max_devs, int, 0);
222 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
223 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
224 module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
225 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:16)");
226 module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
227 MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
228 MODULE_LICENSE("GPL");
229 MODULE_VERSION(IPR_DRIVER_VERSION);
231 /* A constant array of IOASCs/URCs/Error Messages */
233 struct ipr_error_table_t ipr_error_table[] = {
234 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
235 "8155: An unknown error was received"},
237 "Soft underlength error"},
239 "Command to be cancelled not found"},
241 "Qualified success"},
242 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
243 "FFFE: Soft device bus error recovered by the IOA"},
244 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
245 "4101: Soft device bus fabric error"},
246 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
247 "FFFC: Logical block guard error recovered by the device"},
248 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
249 "FFFC: Logical block reference tag error recovered by the device"},
250 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
251 "4171: Recovered scatter list tag / sequence number error"},
252 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
253 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
254 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
255 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
256 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
257 "FFFD: Recovered logical block reference tag error detected by the IOA"},
258 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
259 "FFFD: Logical block guard error recovered by the IOA"},
260 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
261 "FFF9: Device sector reassign successful"},
262 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
263 "FFF7: Media error recovered by device rewrite procedures"},
264 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
265 "7001: IOA sector reassignment successful"},
266 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
267 "FFF9: Soft media error. Sector reassignment recommended"},
268 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
269 "FFF7: Media error recovered by IOA rewrite procedures"},
270 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
271 "FF3D: Soft PCI bus error recovered by the IOA"},
272 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
273 "FFF6: Device hardware error recovered by the IOA"},
274 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
275 "FFF6: Device hardware error recovered by the device"},
276 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
277 "FF3D: Soft IOA error recovered by the IOA"},
278 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
279 "FFFA: Undefined device response recovered by the IOA"},
280 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
281 "FFF6: Device bus error, message or command phase"},
282 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
283 "FFFE: Task Management Function failed"},
284 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
285 "FFF6: Failure prediction threshold exceeded"},
286 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
287 "8009: Impending cache battery pack failure"},
289 "Logical Unit in process of becoming ready"},
291 "Initializing command required"},
293 "34FF: Disk device format in progress"},
295 "Logical unit not accessible, target port in unavailable state"},
296 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
297 "9070: IOA requested reset"},
299 "Synchronization required"},
301 "IOA microcode download required"},
303 "Device bus connection is prohibited by host"},
305 "No ready, IOA shutdown"},
307 "Not ready, IOA has been shutdown"},
308 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
309 "3020: Storage subsystem configuration error"},
311 "FFF5: Medium error, data unreadable, recommend reassign"},
313 "7000: Medium error, data unreadable, do not reassign"},
314 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
315 "FFF3: Disk media format bad"},
316 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
317 "3002: Addressed device failed to respond to selection"},
318 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
319 "3100: Device bus error"},
320 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
321 "3109: IOA timed out a device command"},
323 "3120: SCSI bus is not operational"},
324 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
325 "4100: Hard device bus fabric error"},
326 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
327 "310C: Logical block guard error detected by the device"},
328 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
329 "310C: Logical block reference tag error detected by the device"},
330 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
331 "4170: Scatter list tag / sequence number error"},
332 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
333 "8150: Logical block CRC error on IOA to Host transfer"},
334 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
335 "4170: Logical block sequence number error on IOA to Host transfer"},
336 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
337 "310D: Logical block reference tag error detected by the IOA"},
338 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
339 "310D: Logical block guard error detected by the IOA"},
340 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
341 "9000: IOA reserved area data check"},
342 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
343 "9001: IOA reserved area invalid data pattern"},
344 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
345 "9002: IOA reserved area LRC error"},
346 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
347 "Hardware Error, IOA metadata access error"},
348 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
349 "102E: Out of alternate sectors for disk storage"},
350 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
351 "FFF4: Data transfer underlength error"},
352 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
353 "FFF4: Data transfer overlength error"},
354 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
355 "3400: Logical unit failure"},
356 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
357 "FFF4: Device microcode is corrupt"},
358 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
359 "8150: PCI bus error"},
361 "Unsupported device bus message received"},
362 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
363 "FFF4: Disk device problem"},
364 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
365 "8150: Permanent IOA failure"},
366 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
367 "3010: Disk device returned wrong response to IOA"},
368 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
369 "8151: IOA microcode error"},
371 "Device bus status error"},
372 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
373 "8157: IOA error requiring IOA reset to recover"},
375 "ATA device status error"},
377 "Message reject received from the device"},
378 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
379 "8008: A permanent cache battery pack failure occurred"},
380 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
381 "9090: Disk unit has been modified after the last known status"},
382 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
383 "9081: IOA detected device error"},
384 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
385 "9082: IOA detected device error"},
386 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
387 "3110: Device bus error, message or command phase"},
388 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
389 "3110: SAS Command / Task Management Function failed"},
390 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
391 "9091: Incorrect hardware configuration change has been detected"},
392 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
393 "9073: Invalid multi-adapter configuration"},
394 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
395 "4010: Incorrect connection between cascaded expanders"},
396 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
397 "4020: Connections exceed IOA design limits"},
398 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
399 "4030: Incorrect multipath connection"},
400 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
401 "4110: Unsupported enclosure function"},
402 {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
403 "4120: SAS cable VPD cannot be read"},
404 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
405 "FFF4: Command to logical unit failed"},
407 "Illegal request, invalid request type or request packet"},
409 "Illegal request, invalid resource handle"},
411 "Illegal request, commands not allowed to this device"},
413 "Illegal request, command not allowed to a secondary adapter"},
415 "Illegal request, command not allowed to a non-optimized resource"},
417 "Illegal request, invalid field in parameter list"},
419 "Illegal request, parameter not supported"},
421 "Illegal request, parameter value invalid"},
423 "Illegal request, command sequence error"},
425 "Illegal request, dual adapter support not enabled"},
427 "Illegal request, another cable connector was physically disabled"},
429 "Illegal request, inconsistent group id/group count"},
430 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
431 "9031: Array protection temporarily suspended, protection resuming"},
432 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
433 "9040: Array protection temporarily suspended, protection resuming"},
434 {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
435 "4080: IOA exceeded maximum operating temperature"},
436 {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
437 "4085: Service required"},
438 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
439 "3140: Device bus not ready to ready transition"},
440 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
441 "FFFB: SCSI bus was reset"},
443 "FFFE: SCSI bus transition to single ended"},
445 "FFFE: SCSI bus transition to LVD"},
446 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
447 "FFFB: SCSI bus was reset by another initiator"},
448 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
449 "3029: A device replacement has occurred"},
450 {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
451 "4102: Device bus fabric performance degradation"},
452 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
453 "9051: IOA cache data exists for a missing or failed device"},
454 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
455 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
456 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
457 "9025: Disk unit is not supported at its physical location"},
458 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
459 "3020: IOA detected a SCSI bus configuration error"},
460 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
461 "3150: SCSI bus configuration error"},
462 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
463 "9074: Asymmetric advanced function disk configuration"},
464 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
465 "4040: Incomplete multipath connection between IOA and enclosure"},
466 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
467 "4041: Incomplete multipath connection between enclosure and device"},
468 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
469 "9075: Incomplete multipath connection between IOA and remote IOA"},
470 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
471 "9076: Configuration error, missing remote IOA"},
472 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
473 "4050: Enclosure does not support a required multipath function"},
474 {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
475 "4121: Configuration error, required cable is missing"},
476 {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
477 "4122: Cable is not plugged into the correct location on remote IOA"},
478 {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
479 "4123: Configuration error, invalid cable vital product data"},
480 {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
481 "4124: Configuration error, both cable ends are plugged into the same IOA"},
482 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
483 "4070: Logically bad block written on device"},
484 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
485 "9041: Array protection temporarily suspended"},
486 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
487 "9042: Corrupt array parity detected on specified device"},
488 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
489 "9030: Array no longer protected due to missing or failed disk unit"},
490 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
491 "9071: Link operational transition"},
492 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
493 "9072: Link not operational transition"},
494 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
495 "9032: Array exposed but still protected"},
496 {0x066B8300, 0, IPR_DEBUG_LOG_LEVEL,
497 "70DD: Device forced failed by disrupt device command"},
498 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
499 "4061: Multipath redundancy level got better"},
500 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
501 "4060: Multipath redundancy level got worse"},
502 {0x06808100, 0, IPR_DEBUG_LOG_LEVEL,
503 "9083: Device raw mode enabled"},
504 {0x06808200, 0, IPR_DEBUG_LOG_LEVEL,
505 "9084: Device raw mode disabled"},
507 "Failure due to other device"},
508 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
509 "9008: IOA does not support functions expected by devices"},
510 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
511 "9010: Cache data associated with attached devices cannot be found"},
512 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
513 "9011: Cache data belongs to devices other than those attached"},
514 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
515 "9020: Array missing 2 or more devices with only 1 device present"},
516 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
517 "9021: Array missing 2 or more devices with 2 or more devices present"},
518 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
519 "9022: Exposed array is missing a required device"},
520 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
521 "9023: Array member(s) not at required physical locations"},
522 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
523 "9024: Array not functional due to present hardware configuration"},
524 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
525 "9026: Array not functional due to present hardware configuration"},
526 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
527 "9027: Array is missing a device and parity is out of sync"},
528 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
529 "9028: Maximum number of arrays already exist"},
530 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
531 "9050: Required cache data cannot be located for a disk unit"},
532 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
533 "9052: Cache data exists for a device that has been modified"},
534 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
535 "9054: IOA resources not available due to previous problems"},
536 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
537 "9092: Disk unit requires initialization before use"},
538 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
539 "9029: Incorrect hardware configuration change has been detected"},
540 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
541 "9060: One or more disk pairs are missing from an array"},
542 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
543 "9061: One or more disks are missing from an array"},
544 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
545 "9062: One or more disks are missing from an array"},
546 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
547 "9063: Maximum number of functional arrays has been exceeded"},
549 "Data protect, other volume set problem"},
551 "Aborted command, invalid descriptor"},
553 "Target operating conditions have changed, dual adapter takeover"},
555 "Aborted command, medium removal prevented"},
557 "Command terminated by host"},
559 "Aborted command, command terminated by host"}
562 static const struct ipr_ses_table_entry ipr_ses_table[] = {
563 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
564 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
565 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
566 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
567 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
568 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
569 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
570 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
571 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
572 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
573 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
574 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
575 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
579 * Function Prototypes
581 static int ipr_reset_alert(struct ipr_cmnd *);
582 static void ipr_process_ccn(struct ipr_cmnd *);
583 static void ipr_process_error(struct ipr_cmnd *);
584 static void ipr_reset_ioa_job(struct ipr_cmnd *);
585 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
586 enum ipr_shutdown_type);
588 #ifdef CONFIG_SCSI_IPR_TRACE
590 * ipr_trc_hook - Add a trace entry to the driver trace
591 * @ipr_cmd: ipr command struct
593 * @add_data: additional data
598 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
599 u8 type, u32 add_data)
601 struct ipr_trace_entry *trace_entry;
602 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
603 unsigned int trace_index;
605 trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
606 trace_entry = &ioa_cfg->trace[trace_index];
607 trace_entry->time = jiffies;
608 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
609 trace_entry->type = type;
610 if (ipr_cmd->ioa_cfg->sis64)
611 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
613 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
614 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
615 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
616 trace_entry->u.add_data = add_data;
620 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
624 * ipr_lock_and_done - Acquire lock and complete command
625 * @ipr_cmd: ipr command struct
630 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
632 unsigned long lock_flags;
633 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
635 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
636 ipr_cmd->done(ipr_cmd);
637 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
641 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
642 * @ipr_cmd: ipr command struct
647 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
649 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
650 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
651 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
652 dma_addr_t dma_addr = ipr_cmd->dma_addr;
655 hrrq_id = ioarcb->cmd_pkt.hrrq_id;
656 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
657 ioarcb->cmd_pkt.hrrq_id = hrrq_id;
658 ioarcb->data_transfer_length = 0;
659 ioarcb->read_data_transfer_length = 0;
660 ioarcb->ioadl_len = 0;
661 ioarcb->read_ioadl_len = 0;
663 if (ipr_cmd->ioa_cfg->sis64) {
664 ioarcb->u.sis64_addr_data.data_ioadl_addr =
665 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
666 ioasa64->u.gata.status = 0;
668 ioarcb->write_ioadl_addr =
669 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
670 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
671 ioasa->u.gata.status = 0;
674 ioasa->hdr.ioasc = 0;
675 ioasa->hdr.residual_data_len = 0;
676 ipr_cmd->scsi_cmd = NULL;
678 ipr_cmd->sense_buffer[0] = 0;
679 ipr_cmd->dma_use_sg = 0;
683 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
684 * @ipr_cmd: ipr command struct
689 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
690 void (*fast_done) (struct ipr_cmnd *))
692 ipr_reinit_ipr_cmnd(ipr_cmd);
693 ipr_cmd->u.scratch = 0;
694 ipr_cmd->sibling = NULL;
695 ipr_cmd->eh_comp = NULL;
696 ipr_cmd->fast_done = fast_done;
697 init_timer(&ipr_cmd->timer);
701 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
702 * @ioa_cfg: ioa config struct
705 * pointer to ipr command struct
708 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
710 struct ipr_cmnd *ipr_cmd = NULL;
712 if (likely(!list_empty(&hrrq->hrrq_free_q))) {
713 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
714 struct ipr_cmnd, queue);
715 list_del(&ipr_cmd->queue);
723 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
724 * @ioa_cfg: ioa config struct
727 * pointer to ipr command struct
730 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
732 struct ipr_cmnd *ipr_cmd =
733 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
734 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
739 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
740 * @ioa_cfg: ioa config struct
741 * @clr_ints: interrupts to clear
743 * This function masks all interrupts on the adapter, then clears the
744 * interrupts specified in the mask
749 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
752 volatile u32 int_reg;
755 /* Stop new interrupts */
756 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
757 spin_lock(&ioa_cfg->hrrq[i]._lock);
758 ioa_cfg->hrrq[i].allow_interrupts = 0;
759 spin_unlock(&ioa_cfg->hrrq[i]._lock);
763 /* Set interrupt mask to stop all new interrupts */
765 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
767 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
769 /* Clear any pending interrupts */
771 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
772 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
773 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
777 * ipr_save_pcix_cmd_reg - Save PCI-X command register
778 * @ioa_cfg: ioa config struct
781 * 0 on success / -EIO on failure
783 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
785 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
787 if (pcix_cmd_reg == 0)
790 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
791 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
792 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
796 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
801 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
802 * @ioa_cfg: ioa config struct
805 * 0 on success / -EIO on failure
807 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
809 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
812 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
813 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
814 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
823 * ipr_sata_eh_done - done function for aborted SATA commands
824 * @ipr_cmd: ipr command struct
826 * This function is invoked for ops generated to SATA
827 * devices which are being aborted.
832 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
834 struct ata_queued_cmd *qc = ipr_cmd->qc;
835 struct ipr_sata_port *sata_port = qc->ap->private_data;
837 qc->err_mask |= AC_ERR_OTHER;
838 sata_port->ioasa.status |= ATA_BUSY;
840 if (ipr_cmd->eh_comp)
841 complete(ipr_cmd->eh_comp);
842 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
846 * ipr_scsi_eh_done - mid-layer done function for aborted ops
847 * @ipr_cmd: ipr command struct
849 * This function is invoked by the interrupt handler for
850 * ops generated by the SCSI mid-layer which are being aborted.
855 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
857 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
859 scsi_cmd->result |= (DID_ERROR << 16);
861 scsi_dma_unmap(ipr_cmd->scsi_cmd);
862 scsi_cmd->scsi_done(scsi_cmd);
863 if (ipr_cmd->eh_comp)
864 complete(ipr_cmd->eh_comp);
865 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
869 * ipr_fail_all_ops - Fails all outstanding ops.
870 * @ioa_cfg: ioa config struct
872 * This function fails all outstanding ops.
877 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
879 struct ipr_cmnd *ipr_cmd, *temp;
880 struct ipr_hrr_queue *hrrq;
883 for_each_hrrq(hrrq, ioa_cfg) {
884 spin_lock(&hrrq->_lock);
885 list_for_each_entry_safe(ipr_cmd,
886 temp, &hrrq->hrrq_pending_q, queue) {
887 list_del(&ipr_cmd->queue);
889 ipr_cmd->s.ioasa.hdr.ioasc =
890 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
891 ipr_cmd->s.ioasa.hdr.ilid =
892 cpu_to_be32(IPR_DRIVER_ILID);
894 if (ipr_cmd->scsi_cmd)
895 ipr_cmd->done = ipr_scsi_eh_done;
896 else if (ipr_cmd->qc)
897 ipr_cmd->done = ipr_sata_eh_done;
899 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
900 IPR_IOASC_IOA_WAS_RESET);
901 del_timer(&ipr_cmd->timer);
902 ipr_cmd->done(ipr_cmd);
904 spin_unlock(&hrrq->_lock);
910 * ipr_send_command - Send driver initiated requests.
911 * @ipr_cmd: ipr command struct
913 * This function sends a command to the adapter using the correct write call.
914 * In the case of sis64, calculate the ioarcb size required. Then or in the
920 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
922 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
923 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
925 if (ioa_cfg->sis64) {
926 /* The default size is 256 bytes */
927 send_dma_addr |= 0x1;
929 /* If the number of ioadls * size of ioadl > 128 bytes,
930 then use a 512 byte ioarcb */
931 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
932 send_dma_addr |= 0x4;
933 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
935 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
939 * ipr_do_req - Send driver initiated requests.
940 * @ipr_cmd: ipr command struct
941 * @done: done function
942 * @timeout_func: timeout function
943 * @timeout: timeout value
945 * This function sends the specified command to the adapter with the
946 * timeout given. The done function is invoked on command completion.
951 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
952 void (*done) (struct ipr_cmnd *),
953 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
955 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
957 ipr_cmd->done = done;
959 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
960 ipr_cmd->timer.expires = jiffies + timeout;
961 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
963 add_timer(&ipr_cmd->timer);
965 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
967 ipr_send_command(ipr_cmd);
971 * ipr_internal_cmd_done - Op done function for an internally generated op.
972 * @ipr_cmd: ipr command struct
974 * This function is the op done function for an internally generated,
975 * blocking op. It simply wakes the sleeping thread.
980 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
982 if (ipr_cmd->sibling)
983 ipr_cmd->sibling = NULL;
985 complete(&ipr_cmd->completion);
989 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
990 * @ipr_cmd: ipr command struct
991 * @dma_addr: dma address
992 * @len: transfer length
993 * @flags: ioadl flag value
995 * This function initializes an ioadl in the case where there is only a single
1001 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
1004 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1005 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
1007 ipr_cmd->dma_use_sg = 1;
1009 if (ipr_cmd->ioa_cfg->sis64) {
1010 ioadl64->flags = cpu_to_be32(flags);
1011 ioadl64->data_len = cpu_to_be32(len);
1012 ioadl64->address = cpu_to_be64(dma_addr);
1014 ipr_cmd->ioarcb.ioadl_len =
1015 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1016 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1018 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1019 ioadl->address = cpu_to_be32(dma_addr);
1021 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1022 ipr_cmd->ioarcb.read_ioadl_len =
1023 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1024 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1026 ipr_cmd->ioarcb.ioadl_len =
1027 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1028 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1034 * ipr_send_blocking_cmd - Send command and sleep on its completion.
1035 * @ipr_cmd: ipr command struct
1036 * @timeout_func: function to invoke if command times out
1042 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1043 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
1046 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1048 init_completion(&ipr_cmd->completion);
1049 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1051 spin_unlock_irq(ioa_cfg->host->host_lock);
1052 wait_for_completion(&ipr_cmd->completion);
1053 spin_lock_irq(ioa_cfg->host->host_lock);
1056 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1060 if (ioa_cfg->hrrq_num == 1)
1063 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1064 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1070 * ipr_send_hcam - Send an HCAM to the adapter.
1071 * @ioa_cfg: ioa config struct
1073 * @hostrcb: hostrcb struct
1075 * This function will send a Host Controlled Async command to the adapter.
1076 * If HCAMs are currently not allowed to be issued to the adapter, it will
1077 * place the hostrcb on the free queue.
1082 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1083 struct ipr_hostrcb *hostrcb)
1085 struct ipr_cmnd *ipr_cmd;
1086 struct ipr_ioarcb *ioarcb;
1088 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1089 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1090 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1091 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1093 ipr_cmd->u.hostrcb = hostrcb;
1094 ioarcb = &ipr_cmd->ioarcb;
1096 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1097 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1098 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1099 ioarcb->cmd_pkt.cdb[1] = type;
1100 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1101 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1103 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1104 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1106 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1107 ipr_cmd->done = ipr_process_ccn;
1109 ipr_cmd->done = ipr_process_error;
1111 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1113 ipr_send_command(ipr_cmd);
1115 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1120 * ipr_update_ata_class - Update the ata class in the resource entry
1121 * @res: resource entry struct
1122 * @proto: cfgte device bus protocol value
1127 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1130 case IPR_PROTO_SATA:
1131 case IPR_PROTO_SAS_STP:
1132 res->ata_class = ATA_DEV_ATA;
1134 case IPR_PROTO_SATA_ATAPI:
1135 case IPR_PROTO_SAS_STP_ATAPI:
1136 res->ata_class = ATA_DEV_ATAPI;
1139 res->ata_class = ATA_DEV_UNKNOWN;
1145 * ipr_init_res_entry - Initialize a resource entry struct.
1146 * @res: resource entry struct
1147 * @cfgtew: config table entry wrapper struct
1152 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1153 struct ipr_config_table_entry_wrapper *cfgtew)
1157 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1158 struct ipr_resource_entry *gscsi_res = NULL;
1160 res->needs_sync_complete = 0;
1163 res->del_from_ml = 0;
1164 res->resetting_device = 0;
1165 res->reset_occurred = 0;
1167 res->sata_port = NULL;
1169 if (ioa_cfg->sis64) {
1170 proto = cfgtew->u.cfgte64->proto;
1171 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1172 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1173 res->qmodel = IPR_QUEUEING_MODEL64(res);
1174 res->type = cfgtew->u.cfgte64->res_type;
1176 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1177 sizeof(res->res_path));
1180 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1181 sizeof(res->dev_lun.scsi_lun));
1182 res->lun = scsilun_to_int(&res->dev_lun);
1184 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1185 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1186 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1188 res->target = gscsi_res->target;
1193 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1194 ioa_cfg->max_devs_supported);
1195 set_bit(res->target, ioa_cfg->target_ids);
1197 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1198 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1200 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1201 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1202 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1203 ioa_cfg->max_devs_supported);
1204 set_bit(res->target, ioa_cfg->array_ids);
1205 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1206 res->bus = IPR_VSET_VIRTUAL_BUS;
1207 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1208 ioa_cfg->max_devs_supported);
1209 set_bit(res->target, ioa_cfg->vset_ids);
1211 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1212 ioa_cfg->max_devs_supported);
1213 set_bit(res->target, ioa_cfg->target_ids);
1216 proto = cfgtew->u.cfgte->proto;
1217 res->qmodel = IPR_QUEUEING_MODEL(res);
1218 res->flags = cfgtew->u.cfgte->flags;
1219 if (res->flags & IPR_IS_IOA_RESOURCE)
1220 res->type = IPR_RES_TYPE_IOAFP;
1222 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1224 res->bus = cfgtew->u.cfgte->res_addr.bus;
1225 res->target = cfgtew->u.cfgte->res_addr.target;
1226 res->lun = cfgtew->u.cfgte->res_addr.lun;
1227 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1230 ipr_update_ata_class(res, proto);
1234 * ipr_is_same_device - Determine if two devices are the same.
1235 * @res: resource entry struct
1236 * @cfgtew: config table entry wrapper struct
1239 * 1 if the devices are the same / 0 otherwise
1241 static int ipr_is_same_device(struct ipr_resource_entry *res,
1242 struct ipr_config_table_entry_wrapper *cfgtew)
1244 if (res->ioa_cfg->sis64) {
1245 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1246 sizeof(cfgtew->u.cfgte64->dev_id)) &&
1247 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1248 sizeof(cfgtew->u.cfgte64->lun))) {
1252 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1253 res->target == cfgtew->u.cfgte->res_addr.target &&
1254 res->lun == cfgtew->u.cfgte->res_addr.lun)
1262 * __ipr_format_res_path - Format the resource path for printing.
1263 * @res_path: resource path
1265 * @len: length of buffer provided
1270 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1276 p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1277 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1278 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1284 * ipr_format_res_path - Format the resource path for printing.
1285 * @ioa_cfg: ioa config struct
1286 * @res_path: resource path
1288 * @len: length of buffer provided
1293 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1294 u8 *res_path, char *buffer, int len)
1299 p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1300 __ipr_format_res_path(res_path, p, len - (buffer - p));
1305 * ipr_update_res_entry - Update the resource entry.
1306 * @res: resource entry struct
1307 * @cfgtew: config table entry wrapper struct
1312 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1313 struct ipr_config_table_entry_wrapper *cfgtew)
1315 char buffer[IPR_MAX_RES_PATH_LENGTH];
1319 if (res->ioa_cfg->sis64) {
1320 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1321 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1322 res->type = cfgtew->u.cfgte64->res_type;
1324 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1325 sizeof(struct ipr_std_inq_data));
1327 res->qmodel = IPR_QUEUEING_MODEL64(res);
1328 proto = cfgtew->u.cfgte64->proto;
1329 res->res_handle = cfgtew->u.cfgte64->res_handle;
1330 res->dev_id = cfgtew->u.cfgte64->dev_id;
1332 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1333 sizeof(res->dev_lun.scsi_lun));
1335 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1336 sizeof(res->res_path))) {
1337 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1338 sizeof(res->res_path));
1342 if (res->sdev && new_path)
1343 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1344 ipr_format_res_path(res->ioa_cfg,
1345 res->res_path, buffer, sizeof(buffer)));
1347 res->flags = cfgtew->u.cfgte->flags;
1348 if (res->flags & IPR_IS_IOA_RESOURCE)
1349 res->type = IPR_RES_TYPE_IOAFP;
1351 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1353 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1354 sizeof(struct ipr_std_inq_data));
1356 res->qmodel = IPR_QUEUEING_MODEL(res);
1357 proto = cfgtew->u.cfgte->proto;
1358 res->res_handle = cfgtew->u.cfgte->res_handle;
1361 ipr_update_ata_class(res, proto);
1365 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1367 * @res: resource entry struct
1368 * @cfgtew: config table entry wrapper struct
1373 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1375 struct ipr_resource_entry *gscsi_res = NULL;
1376 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1378 if (!ioa_cfg->sis64)
1381 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1382 clear_bit(res->target, ioa_cfg->array_ids);
1383 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1384 clear_bit(res->target, ioa_cfg->vset_ids);
1385 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1386 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1387 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1389 clear_bit(res->target, ioa_cfg->target_ids);
1391 } else if (res->bus == 0)
1392 clear_bit(res->target, ioa_cfg->target_ids);
1396 * ipr_handle_config_change - Handle a config change from the adapter
1397 * @ioa_cfg: ioa config struct
1403 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1404 struct ipr_hostrcb *hostrcb)
1406 struct ipr_resource_entry *res = NULL;
1407 struct ipr_config_table_entry_wrapper cfgtew;
1408 __be32 cc_res_handle;
1412 if (ioa_cfg->sis64) {
1413 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1414 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1416 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1417 cc_res_handle = cfgtew.u.cfgte->res_handle;
1420 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1421 if (res->res_handle == cc_res_handle) {
1428 if (list_empty(&ioa_cfg->free_res_q)) {
1429 ipr_send_hcam(ioa_cfg,
1430 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1435 res = list_entry(ioa_cfg->free_res_q.next,
1436 struct ipr_resource_entry, queue);
1438 list_del(&res->queue);
1439 ipr_init_res_entry(res, &cfgtew);
1440 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1443 ipr_update_res_entry(res, &cfgtew);
1445 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1447 res->del_from_ml = 1;
1448 res->res_handle = IPR_INVALID_RES_HANDLE;
1449 schedule_work(&ioa_cfg->work_q);
1451 ipr_clear_res_target(res);
1452 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1454 } else if (!res->sdev || res->del_from_ml) {
1456 schedule_work(&ioa_cfg->work_q);
1459 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1463 * ipr_process_ccn - Op done function for a CCN.
1464 * @ipr_cmd: ipr command struct
1466 * This function is the op done function for a configuration
1467 * change notification host controlled async from the adapter.
1472 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1474 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1475 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1476 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1478 list_del_init(&hostrcb->queue);
1479 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1482 if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
1483 ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
1484 dev_err(&ioa_cfg->pdev->dev,
1485 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1487 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1489 ipr_handle_config_change(ioa_cfg, hostrcb);
1494 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1495 * @i: index into buffer
1496 * @buf: string to modify
1498 * This function will strip all trailing whitespace, pad the end
1499 * of the string with a single space, and NULL terminate the string.
1502 * new length of string
1504 static int strip_and_pad_whitespace(int i, char *buf)
1506 while (i && buf[i] == ' ')
1514 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1515 * @prefix: string to print at start of printk
1516 * @hostrcb: hostrcb pointer
1517 * @vpd: vendor/product id/sn struct
1522 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1523 struct ipr_vpd *vpd)
1525 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1528 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1529 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1531 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1532 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1534 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1535 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1537 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1541 * ipr_log_vpd - Log the passed VPD to the error log.
1542 * @vpd: vendor/product id/sn struct
1547 static void ipr_log_vpd(struct ipr_vpd *vpd)
1549 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1550 + IPR_SERIAL_NUM_LEN];
1552 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1553 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1555 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1556 ipr_err("Vendor/Product ID: %s\n", buffer);
1558 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1559 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1560 ipr_err(" Serial Number: %s\n", buffer);
1564 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1565 * @prefix: string to print at start of printk
1566 * @hostrcb: hostrcb pointer
1567 * @vpd: vendor/product id/sn/wwn struct
1572 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1573 struct ipr_ext_vpd *vpd)
1575 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1576 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1577 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1581 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1582 * @vpd: vendor/product id/sn/wwn struct
1587 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1589 ipr_log_vpd(&vpd->vpd);
1590 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1591 be32_to_cpu(vpd->wwid[1]));
1595 * ipr_log_enhanced_cache_error - Log a cache error.
1596 * @ioa_cfg: ioa config struct
1597 * @hostrcb: hostrcb struct
1602 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1603 struct ipr_hostrcb *hostrcb)
1605 struct ipr_hostrcb_type_12_error *error;
1608 error = &hostrcb->hcam.u.error64.u.type_12_error;
1610 error = &hostrcb->hcam.u.error.u.type_12_error;
1612 ipr_err("-----Current Configuration-----\n");
1613 ipr_err("Cache Directory Card Information:\n");
1614 ipr_log_ext_vpd(&error->ioa_vpd);
1615 ipr_err("Adapter Card Information:\n");
1616 ipr_log_ext_vpd(&error->cfc_vpd);
1618 ipr_err("-----Expected Configuration-----\n");
1619 ipr_err("Cache Directory Card Information:\n");
1620 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1621 ipr_err("Adapter Card Information:\n");
1622 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1624 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1625 be32_to_cpu(error->ioa_data[0]),
1626 be32_to_cpu(error->ioa_data[1]),
1627 be32_to_cpu(error->ioa_data[2]));
1631 * ipr_log_cache_error - Log a cache error.
1632 * @ioa_cfg: ioa config struct
1633 * @hostrcb: hostrcb struct
1638 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1639 struct ipr_hostrcb *hostrcb)
1641 struct ipr_hostrcb_type_02_error *error =
1642 &hostrcb->hcam.u.error.u.type_02_error;
1644 ipr_err("-----Current Configuration-----\n");
1645 ipr_err("Cache Directory Card Information:\n");
1646 ipr_log_vpd(&error->ioa_vpd);
1647 ipr_err("Adapter Card Information:\n");
1648 ipr_log_vpd(&error->cfc_vpd);
1650 ipr_err("-----Expected Configuration-----\n");
1651 ipr_err("Cache Directory Card Information:\n");
1652 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1653 ipr_err("Adapter Card Information:\n");
1654 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1656 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1657 be32_to_cpu(error->ioa_data[0]),
1658 be32_to_cpu(error->ioa_data[1]),
1659 be32_to_cpu(error->ioa_data[2]));
1663 * ipr_log_enhanced_config_error - Log a configuration error.
1664 * @ioa_cfg: ioa config struct
1665 * @hostrcb: hostrcb struct
1670 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1671 struct ipr_hostrcb *hostrcb)
1673 int errors_logged, i;
1674 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1675 struct ipr_hostrcb_type_13_error *error;
1677 error = &hostrcb->hcam.u.error.u.type_13_error;
1678 errors_logged = be32_to_cpu(error->errors_logged);
1680 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1681 be32_to_cpu(error->errors_detected), errors_logged);
1683 dev_entry = error->dev;
1685 for (i = 0; i < errors_logged; i++, dev_entry++) {
1688 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1689 ipr_log_ext_vpd(&dev_entry->vpd);
1691 ipr_err("-----New Device Information-----\n");
1692 ipr_log_ext_vpd(&dev_entry->new_vpd);
1694 ipr_err("Cache Directory Card Information:\n");
1695 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1697 ipr_err("Adapter Card Information:\n");
1698 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1703 * ipr_log_sis64_config_error - Log a device error.
1704 * @ioa_cfg: ioa config struct
1705 * @hostrcb: hostrcb struct
1710 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1711 struct ipr_hostrcb *hostrcb)
1713 int errors_logged, i;
1714 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1715 struct ipr_hostrcb_type_23_error *error;
1716 char buffer[IPR_MAX_RES_PATH_LENGTH];
1718 error = &hostrcb->hcam.u.error64.u.type_23_error;
1719 errors_logged = be32_to_cpu(error->errors_logged);
1721 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1722 be32_to_cpu(error->errors_detected), errors_logged);
1724 dev_entry = error->dev;
1726 for (i = 0; i < errors_logged; i++, dev_entry++) {
1729 ipr_err("Device %d : %s", i + 1,
1730 __ipr_format_res_path(dev_entry->res_path,
1731 buffer, sizeof(buffer)));
1732 ipr_log_ext_vpd(&dev_entry->vpd);
1734 ipr_err("-----New Device Information-----\n");
1735 ipr_log_ext_vpd(&dev_entry->new_vpd);
1737 ipr_err("Cache Directory Card Information:\n");
1738 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1740 ipr_err("Adapter Card Information:\n");
1741 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1746 * ipr_log_config_error - Log a configuration error.
1747 * @ioa_cfg: ioa config struct
1748 * @hostrcb: hostrcb struct
1753 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1754 struct ipr_hostrcb *hostrcb)
1756 int errors_logged, i;
1757 struct ipr_hostrcb_device_data_entry *dev_entry;
1758 struct ipr_hostrcb_type_03_error *error;
1760 error = &hostrcb->hcam.u.error.u.type_03_error;
1761 errors_logged = be32_to_cpu(error->errors_logged);
1763 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1764 be32_to_cpu(error->errors_detected), errors_logged);
1766 dev_entry = error->dev;
1768 for (i = 0; i < errors_logged; i++, dev_entry++) {
1771 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1772 ipr_log_vpd(&dev_entry->vpd);
1774 ipr_err("-----New Device Information-----\n");
1775 ipr_log_vpd(&dev_entry->new_vpd);
1777 ipr_err("Cache Directory Card Information:\n");
1778 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1780 ipr_err("Adapter Card Information:\n");
1781 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1783 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1784 be32_to_cpu(dev_entry->ioa_data[0]),
1785 be32_to_cpu(dev_entry->ioa_data[1]),
1786 be32_to_cpu(dev_entry->ioa_data[2]),
1787 be32_to_cpu(dev_entry->ioa_data[3]),
1788 be32_to_cpu(dev_entry->ioa_data[4]));
1793 * ipr_log_enhanced_array_error - Log an array configuration error.
1794 * @ioa_cfg: ioa config struct
1795 * @hostrcb: hostrcb struct
1800 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1801 struct ipr_hostrcb *hostrcb)
1804 struct ipr_hostrcb_type_14_error *error;
1805 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1806 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1808 error = &hostrcb->hcam.u.error.u.type_14_error;
1812 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1813 error->protection_level,
1814 ioa_cfg->host->host_no,
1815 error->last_func_vset_res_addr.bus,
1816 error->last_func_vset_res_addr.target,
1817 error->last_func_vset_res_addr.lun);
1821 array_entry = error->array_member;
1822 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1823 ARRAY_SIZE(error->array_member));
1825 for (i = 0; i < num_entries; i++, array_entry++) {
1826 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1829 if (be32_to_cpu(error->exposed_mode_adn) == i)
1830 ipr_err("Exposed Array Member %d:\n", i);
1832 ipr_err("Array Member %d:\n", i);
1834 ipr_log_ext_vpd(&array_entry->vpd);
1835 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1836 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1837 "Expected Location");
1844 * ipr_log_array_error - Log an array configuration error.
1845 * @ioa_cfg: ioa config struct
1846 * @hostrcb: hostrcb struct
1851 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1852 struct ipr_hostrcb *hostrcb)
1855 struct ipr_hostrcb_type_04_error *error;
1856 struct ipr_hostrcb_array_data_entry *array_entry;
1857 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1859 error = &hostrcb->hcam.u.error.u.type_04_error;
1863 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1864 error->protection_level,
1865 ioa_cfg->host->host_no,
1866 error->last_func_vset_res_addr.bus,
1867 error->last_func_vset_res_addr.target,
1868 error->last_func_vset_res_addr.lun);
1872 array_entry = error->array_member;
1874 for (i = 0; i < 18; i++) {
1875 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1878 if (be32_to_cpu(error->exposed_mode_adn) == i)
1879 ipr_err("Exposed Array Member %d:\n", i);
1881 ipr_err("Array Member %d:\n", i);
1883 ipr_log_vpd(&array_entry->vpd);
1885 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1886 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1887 "Expected Location");
1892 array_entry = error->array_member2;
1899 * ipr_log_hex_data - Log additional hex IOA error data.
1900 * @ioa_cfg: ioa config struct
1901 * @data: IOA error data
1907 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
1914 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1915 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1917 for (i = 0; i < len / 4; i += 4) {
1918 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1919 be32_to_cpu(data[i]),
1920 be32_to_cpu(data[i+1]),
1921 be32_to_cpu(data[i+2]),
1922 be32_to_cpu(data[i+3]));
1927 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1928 * @ioa_cfg: ioa config struct
1929 * @hostrcb: hostrcb struct
1934 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1935 struct ipr_hostrcb *hostrcb)
1937 struct ipr_hostrcb_type_17_error *error;
1940 error = &hostrcb->hcam.u.error64.u.type_17_error;
1942 error = &hostrcb->hcam.u.error.u.type_17_error;
1944 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1945 strim(error->failure_reason);
1947 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1948 be32_to_cpu(hostrcb->hcam.u.error.prc));
1949 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1950 ipr_log_hex_data(ioa_cfg, error->data,
1951 be32_to_cpu(hostrcb->hcam.length) -
1952 (offsetof(struct ipr_hostrcb_error, u) +
1953 offsetof(struct ipr_hostrcb_type_17_error, data)));
1957 * ipr_log_dual_ioa_error - Log a dual adapter error.
1958 * @ioa_cfg: ioa config struct
1959 * @hostrcb: hostrcb struct
1964 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1965 struct ipr_hostrcb *hostrcb)
1967 struct ipr_hostrcb_type_07_error *error;
1969 error = &hostrcb->hcam.u.error.u.type_07_error;
1970 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1971 strim(error->failure_reason);
1973 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1974 be32_to_cpu(hostrcb->hcam.u.error.prc));
1975 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1976 ipr_log_hex_data(ioa_cfg, error->data,
1977 be32_to_cpu(hostrcb->hcam.length) -
1978 (offsetof(struct ipr_hostrcb_error, u) +
1979 offsetof(struct ipr_hostrcb_type_07_error, data)));
1982 static const struct {
1985 } path_active_desc[] = {
1986 { IPR_PATH_NO_INFO, "Path" },
1987 { IPR_PATH_ACTIVE, "Active path" },
1988 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1991 static const struct {
1994 } path_state_desc[] = {
1995 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1996 { IPR_PATH_HEALTHY, "is healthy" },
1997 { IPR_PATH_DEGRADED, "is degraded" },
1998 { IPR_PATH_FAILED, "is failed" }
2002 * ipr_log_fabric_path - Log a fabric path error
2003 * @hostrcb: hostrcb struct
2004 * @fabric: fabric descriptor
2009 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
2010 struct ipr_hostrcb_fabric_desc *fabric)
2013 u8 path_state = fabric->path_state;
2014 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2015 u8 state = path_state & IPR_PATH_STATE_MASK;
2017 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2018 if (path_active_desc[i].active != active)
2021 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2022 if (path_state_desc[j].state != state)
2025 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2026 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2027 path_active_desc[i].desc, path_state_desc[j].desc,
2029 } else if (fabric->cascaded_expander == 0xff) {
2030 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2031 path_active_desc[i].desc, path_state_desc[j].desc,
2032 fabric->ioa_port, fabric->phy);
2033 } else if (fabric->phy == 0xff) {
2034 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2035 path_active_desc[i].desc, path_state_desc[j].desc,
2036 fabric->ioa_port, fabric->cascaded_expander);
2038 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2039 path_active_desc[i].desc, path_state_desc[j].desc,
2040 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2046 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2047 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2051 * ipr_log64_fabric_path - Log a fabric path error
2052 * @hostrcb: hostrcb struct
2053 * @fabric: fabric descriptor
2058 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2059 struct ipr_hostrcb64_fabric_desc *fabric)
2062 u8 path_state = fabric->path_state;
2063 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2064 u8 state = path_state & IPR_PATH_STATE_MASK;
2065 char buffer[IPR_MAX_RES_PATH_LENGTH];
2067 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2068 if (path_active_desc[i].active != active)
2071 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2072 if (path_state_desc[j].state != state)
2075 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2076 path_active_desc[i].desc, path_state_desc[j].desc,
2077 ipr_format_res_path(hostrcb->ioa_cfg,
2079 buffer, sizeof(buffer)));
2084 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2085 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2086 buffer, sizeof(buffer)));
2089 static const struct {
2092 } path_type_desc[] = {
2093 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2094 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2095 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2096 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2099 static const struct {
2102 } path_status_desc[] = {
2103 { IPR_PATH_CFG_NO_PROB, "Functional" },
2104 { IPR_PATH_CFG_DEGRADED, "Degraded" },
2105 { IPR_PATH_CFG_FAILED, "Failed" },
2106 { IPR_PATH_CFG_SUSPECT, "Suspect" },
2107 { IPR_PATH_NOT_DETECTED, "Missing" },
2108 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2111 static const char *link_rate[] = {
2114 "phy reset problem",
2131 * ipr_log_path_elem - Log a fabric path element.
2132 * @hostrcb: hostrcb struct
2133 * @cfg: fabric path element struct
2138 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2139 struct ipr_hostrcb_config_element *cfg)
2142 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2143 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2145 if (type == IPR_PATH_CFG_NOT_EXIST)
2148 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2149 if (path_type_desc[i].type != type)
2152 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2153 if (path_status_desc[j].status != status)
2156 if (type == IPR_PATH_CFG_IOA_PORT) {
2157 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2158 path_status_desc[j].desc, path_type_desc[i].desc,
2159 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2160 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2162 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2163 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2164 path_status_desc[j].desc, path_type_desc[i].desc,
2165 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2166 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2167 } else if (cfg->cascaded_expander == 0xff) {
2168 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2169 "WWN=%08X%08X\n", path_status_desc[j].desc,
2170 path_type_desc[i].desc, cfg->phy,
2171 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2172 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2173 } else if (cfg->phy == 0xff) {
2174 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2175 "WWN=%08X%08X\n", path_status_desc[j].desc,
2176 path_type_desc[i].desc, cfg->cascaded_expander,
2177 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2178 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2180 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2181 "WWN=%08X%08X\n", path_status_desc[j].desc,
2182 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2183 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2184 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2191 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2192 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2193 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2194 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2198 * ipr_log64_path_elem - Log a fabric path element.
2199 * @hostrcb: hostrcb struct
2200 * @cfg: fabric path element struct
2205 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2206 struct ipr_hostrcb64_config_element *cfg)
2209 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2210 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2211 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2212 char buffer[IPR_MAX_RES_PATH_LENGTH];
2214 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2217 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2218 if (path_type_desc[i].type != type)
2221 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2222 if (path_status_desc[j].status != status)
2225 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2226 path_status_desc[j].desc, path_type_desc[i].desc,
2227 ipr_format_res_path(hostrcb->ioa_cfg,
2228 cfg->res_path, buffer, sizeof(buffer)),
2229 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2230 be32_to_cpu(cfg->wwid[0]),
2231 be32_to_cpu(cfg->wwid[1]));
2235 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2236 "WWN=%08X%08X\n", cfg->type_status,
2237 ipr_format_res_path(hostrcb->ioa_cfg,
2238 cfg->res_path, buffer, sizeof(buffer)),
2239 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2240 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2244 * ipr_log_fabric_error - Log a fabric error.
2245 * @ioa_cfg: ioa config struct
2246 * @hostrcb: hostrcb struct
2251 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2252 struct ipr_hostrcb *hostrcb)
2254 struct ipr_hostrcb_type_20_error *error;
2255 struct ipr_hostrcb_fabric_desc *fabric;
2256 struct ipr_hostrcb_config_element *cfg;
2259 error = &hostrcb->hcam.u.error.u.type_20_error;
2260 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2261 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2263 add_len = be32_to_cpu(hostrcb->hcam.length) -
2264 (offsetof(struct ipr_hostrcb_error, u) +
2265 offsetof(struct ipr_hostrcb_type_20_error, desc));
2267 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2268 ipr_log_fabric_path(hostrcb, fabric);
2269 for_each_fabric_cfg(fabric, cfg)
2270 ipr_log_path_elem(hostrcb, cfg);
2272 add_len -= be16_to_cpu(fabric->length);
2273 fabric = (struct ipr_hostrcb_fabric_desc *)
2274 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2277 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2281 * ipr_log_sis64_array_error - Log a sis64 array error.
2282 * @ioa_cfg: ioa config struct
2283 * @hostrcb: hostrcb struct
2288 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2289 struct ipr_hostrcb *hostrcb)
2292 struct ipr_hostrcb_type_24_error *error;
2293 struct ipr_hostrcb64_array_data_entry *array_entry;
2294 char buffer[IPR_MAX_RES_PATH_LENGTH];
2295 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2297 error = &hostrcb->hcam.u.error64.u.type_24_error;
2301 ipr_err("RAID %s Array Configuration: %s\n",
2302 error->protection_level,
2303 ipr_format_res_path(ioa_cfg, error->last_res_path,
2304 buffer, sizeof(buffer)));
2308 array_entry = error->array_member;
2309 num_entries = min_t(u32, error->num_entries,
2310 ARRAY_SIZE(error->array_member));
2312 for (i = 0; i < num_entries; i++, array_entry++) {
2314 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2317 if (error->exposed_mode_adn == i)
2318 ipr_err("Exposed Array Member %d:\n", i);
2320 ipr_err("Array Member %d:\n", i);
2322 ipr_err("Array Member %d:\n", i);
2323 ipr_log_ext_vpd(&array_entry->vpd);
2324 ipr_err("Current Location: %s\n",
2325 ipr_format_res_path(ioa_cfg, array_entry->res_path,
2326 buffer, sizeof(buffer)));
2327 ipr_err("Expected Location: %s\n",
2328 ipr_format_res_path(ioa_cfg,
2329 array_entry->expected_res_path,
2330 buffer, sizeof(buffer)));
2337 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2338 * @ioa_cfg: ioa config struct
2339 * @hostrcb: hostrcb struct
2344 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2345 struct ipr_hostrcb *hostrcb)
2347 struct ipr_hostrcb_type_30_error *error;
2348 struct ipr_hostrcb64_fabric_desc *fabric;
2349 struct ipr_hostrcb64_config_element *cfg;
2352 error = &hostrcb->hcam.u.error64.u.type_30_error;
2354 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2355 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2357 add_len = be32_to_cpu(hostrcb->hcam.length) -
2358 (offsetof(struct ipr_hostrcb64_error, u) +
2359 offsetof(struct ipr_hostrcb_type_30_error, desc));
2361 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2362 ipr_log64_fabric_path(hostrcb, fabric);
2363 for_each_fabric_cfg(fabric, cfg)
2364 ipr_log64_path_elem(hostrcb, cfg);
2366 add_len -= be16_to_cpu(fabric->length);
2367 fabric = (struct ipr_hostrcb64_fabric_desc *)
2368 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2371 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2375 * ipr_log_generic_error - Log an adapter error.
2376 * @ioa_cfg: ioa config struct
2377 * @hostrcb: hostrcb struct
2382 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2383 struct ipr_hostrcb *hostrcb)
2385 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2386 be32_to_cpu(hostrcb->hcam.length));
2390 * ipr_log_sis64_device_error - Log a cache error.
2391 * @ioa_cfg: ioa config struct
2392 * @hostrcb: hostrcb struct
2397 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2398 struct ipr_hostrcb *hostrcb)
2400 struct ipr_hostrcb_type_21_error *error;
2401 char buffer[IPR_MAX_RES_PATH_LENGTH];
2403 error = &hostrcb->hcam.u.error64.u.type_21_error;
2405 ipr_err("-----Failing Device Information-----\n");
2406 ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2407 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2408 be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2409 ipr_err("Device Resource Path: %s\n",
2410 __ipr_format_res_path(error->res_path,
2411 buffer, sizeof(buffer)));
2412 error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2413 error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2414 ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2415 ipr_err("Secondary Problem Description: %s\n", error->second_problem_desc);
2416 ipr_err("SCSI Sense Data:\n");
2417 ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2418 ipr_err("SCSI Command Descriptor Block: \n");
2419 ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2421 ipr_err("Additional IOA Data:\n");
2422 ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2426 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2429 * This function will return the index of into the ipr_error_table
2430 * for the specified IOASC. If the IOASC is not in the table,
2431 * 0 will be returned, which points to the entry used for unknown errors.
2434 * index into the ipr_error_table
2436 static u32 ipr_get_error(u32 ioasc)
2440 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2441 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2448 * ipr_handle_log_data - Log an adapter error.
2449 * @ioa_cfg: ioa config struct
2450 * @hostrcb: hostrcb struct
2452 * This function logs an adapter error to the system.
2457 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2458 struct ipr_hostrcb *hostrcb)
2462 struct ipr_hostrcb_type_21_error *error;
2464 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2467 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2468 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2471 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2473 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2475 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2476 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2477 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2478 scsi_report_bus_reset(ioa_cfg->host,
2479 hostrcb->hcam.u.error.fd_res_addr.bus);
2482 error_index = ipr_get_error(ioasc);
2484 if (!ipr_error_table[error_index].log_hcam)
2487 if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2488 hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2489 error = &hostrcb->hcam.u.error64.u.type_21_error;
2491 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2492 ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2496 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2498 /* Set indication we have logged an error */
2499 ioa_cfg->errors_logged++;
2501 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2503 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2504 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2506 switch (hostrcb->hcam.overlay_id) {
2507 case IPR_HOST_RCB_OVERLAY_ID_2:
2508 ipr_log_cache_error(ioa_cfg, hostrcb);
2510 case IPR_HOST_RCB_OVERLAY_ID_3:
2511 ipr_log_config_error(ioa_cfg, hostrcb);
2513 case IPR_HOST_RCB_OVERLAY_ID_4:
2514 case IPR_HOST_RCB_OVERLAY_ID_6:
2515 ipr_log_array_error(ioa_cfg, hostrcb);
2517 case IPR_HOST_RCB_OVERLAY_ID_7:
2518 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2520 case IPR_HOST_RCB_OVERLAY_ID_12:
2521 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2523 case IPR_HOST_RCB_OVERLAY_ID_13:
2524 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2526 case IPR_HOST_RCB_OVERLAY_ID_14:
2527 case IPR_HOST_RCB_OVERLAY_ID_16:
2528 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2530 case IPR_HOST_RCB_OVERLAY_ID_17:
2531 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2533 case IPR_HOST_RCB_OVERLAY_ID_20:
2534 ipr_log_fabric_error(ioa_cfg, hostrcb);
2536 case IPR_HOST_RCB_OVERLAY_ID_21:
2537 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2539 case IPR_HOST_RCB_OVERLAY_ID_23:
2540 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2542 case IPR_HOST_RCB_OVERLAY_ID_24:
2543 case IPR_HOST_RCB_OVERLAY_ID_26:
2544 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2546 case IPR_HOST_RCB_OVERLAY_ID_30:
2547 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2549 case IPR_HOST_RCB_OVERLAY_ID_1:
2550 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2552 ipr_log_generic_error(ioa_cfg, hostrcb);
2557 static struct ipr_hostrcb *ipr_get_free_hostrcb(struct ipr_ioa_cfg *ioa)
2559 struct ipr_hostrcb *hostrcb;
2561 hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q,
2562 struct ipr_hostrcb, queue);
2564 if (unlikely(!hostrcb)) {
2565 dev_info(&ioa->pdev->dev, "Reclaiming async error buffers.");
2566 hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q,
2567 struct ipr_hostrcb, queue);
2570 list_del_init(&hostrcb->queue);
2575 * ipr_process_error - Op done function for an adapter error log.
2576 * @ipr_cmd: ipr command struct
2578 * This function is the op done function for an error log host
2579 * controlled async from the adapter. It will log the error and
2580 * send the HCAM back to the adapter.
2585 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2587 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2588 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2589 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2593 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2595 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2597 list_del_init(&hostrcb->queue);
2598 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2601 ipr_handle_log_data(ioa_cfg, hostrcb);
2602 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2603 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2604 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
2605 ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
2606 dev_err(&ioa_cfg->pdev->dev,
2607 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2610 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
2611 schedule_work(&ioa_cfg->work_q);
2612 hostrcb = ipr_get_free_hostrcb(ioa_cfg);
2614 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2618 * ipr_timeout - An internally generated op has timed out.
2619 * @ipr_cmd: ipr command struct
2621 * This function blocks host requests and initiates an
2627 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2629 unsigned long lock_flags = 0;
2630 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2633 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2635 ioa_cfg->errors_logged++;
2636 dev_err(&ioa_cfg->pdev->dev,
2637 "Adapter being reset due to command timeout.\n");
2639 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2640 ioa_cfg->sdt_state = GET_DUMP;
2642 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2643 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2645 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2650 * ipr_oper_timeout - Adapter timed out transitioning to operational
2651 * @ipr_cmd: ipr command struct
2653 * This function blocks host requests and initiates an
2659 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2661 unsigned long lock_flags = 0;
2662 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2665 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2667 ioa_cfg->errors_logged++;
2668 dev_err(&ioa_cfg->pdev->dev,
2669 "Adapter timed out transitioning to operational.\n");
2671 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2672 ioa_cfg->sdt_state = GET_DUMP;
2674 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2676 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2677 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2680 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2685 * ipr_find_ses_entry - Find matching SES in SES table
2686 * @res: resource entry struct of SES
2689 * pointer to SES table entry / NULL on failure
2691 static const struct ipr_ses_table_entry *
2692 ipr_find_ses_entry(struct ipr_resource_entry *res)
2695 struct ipr_std_inq_vpids *vpids;
2696 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2698 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2699 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2700 if (ste->compare_product_id_byte[j] == 'X') {
2701 vpids = &res->std_inq_data.vpids;
2702 if (vpids->product_id[j] == ste->product_id[j])
2710 if (matches == IPR_PROD_ID_LEN)
2718 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2719 * @ioa_cfg: ioa config struct
2721 * @bus_width: bus width
2724 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2725 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2726 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2727 * max 160MHz = max 320MB/sec).
2729 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2731 struct ipr_resource_entry *res;
2732 const struct ipr_ses_table_entry *ste;
2733 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2735 /* Loop through each config table entry in the config table buffer */
2736 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2737 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2740 if (bus != res->bus)
2743 if (!(ste = ipr_find_ses_entry(res)))
2746 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2749 return max_xfer_rate;
2753 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2754 * @ioa_cfg: ioa config struct
2755 * @max_delay: max delay in micro-seconds to wait
2757 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2760 * 0 on success / other on failure
2762 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2764 volatile u32 pcii_reg;
2767 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2768 while (delay < max_delay) {
2769 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2771 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2774 /* udelay cannot be used if delay is more than a few milliseconds */
2775 if ((delay / 1000) > MAX_UDELAY_MS)
2776 mdelay(delay / 1000);
2786 * ipr_get_sis64_dump_data_section - Dump IOA memory
2787 * @ioa_cfg: ioa config struct
2788 * @start_addr: adapter address to dump
2789 * @dest: destination kernel buffer
2790 * @length_in_words: length to dump in 4 byte words
2795 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2797 __be32 *dest, u32 length_in_words)
2801 for (i = 0; i < length_in_words; i++) {
2802 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2803 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2811 * ipr_get_ldump_data_section - Dump IOA memory
2812 * @ioa_cfg: ioa config struct
2813 * @start_addr: adapter address to dump
2814 * @dest: destination kernel buffer
2815 * @length_in_words: length to dump in 4 byte words
2818 * 0 on success / -EIO on failure
2820 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2822 __be32 *dest, u32 length_in_words)
2824 volatile u32 temp_pcii_reg;
2828 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2829 dest, length_in_words);
2831 /* Write IOA interrupt reg starting LDUMP state */
2832 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2833 ioa_cfg->regs.set_uproc_interrupt_reg32);
2835 /* Wait for IO debug acknowledge */
2836 if (ipr_wait_iodbg_ack(ioa_cfg,
2837 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2838 dev_err(&ioa_cfg->pdev->dev,
2839 "IOA dump long data transfer timeout\n");
2843 /* Signal LDUMP interlocked - clear IO debug ack */
2844 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2845 ioa_cfg->regs.clr_interrupt_reg);
2847 /* Write Mailbox with starting address */
2848 writel(start_addr, ioa_cfg->ioa_mailbox);
2850 /* Signal address valid - clear IOA Reset alert */
2851 writel(IPR_UPROCI_RESET_ALERT,
2852 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2854 for (i = 0; i < length_in_words; i++) {
2855 /* Wait for IO debug acknowledge */
2856 if (ipr_wait_iodbg_ack(ioa_cfg,
2857 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2858 dev_err(&ioa_cfg->pdev->dev,
2859 "IOA dump short data transfer timeout\n");
2863 /* Read data from mailbox and increment destination pointer */
2864 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2867 /* For all but the last word of data, signal data received */
2868 if (i < (length_in_words - 1)) {
2869 /* Signal dump data received - Clear IO debug Ack */
2870 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2871 ioa_cfg->regs.clr_interrupt_reg);
2875 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2876 writel(IPR_UPROCI_RESET_ALERT,
2877 ioa_cfg->regs.set_uproc_interrupt_reg32);
2879 writel(IPR_UPROCI_IO_DEBUG_ALERT,
2880 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2882 /* Signal dump data received - Clear IO debug Ack */
2883 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2884 ioa_cfg->regs.clr_interrupt_reg);
2886 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2887 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2889 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2891 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2901 #ifdef CONFIG_SCSI_IPR_DUMP
2903 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2904 * @ioa_cfg: ioa config struct
2905 * @pci_address: adapter address
2906 * @length: length of data to copy
2908 * Copy data from PCI adapter to kernel buffer.
2909 * Note: length MUST be a 4 byte multiple
2911 * 0 on success / other on failure
2913 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2914 unsigned long pci_address, u32 length)
2916 int bytes_copied = 0;
2917 int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2919 unsigned long lock_flags = 0;
2920 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2923 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2925 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2927 while (bytes_copied < length &&
2928 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2929 if (ioa_dump->page_offset >= PAGE_SIZE ||
2930 ioa_dump->page_offset == 0) {
2931 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2935 return bytes_copied;
2938 ioa_dump->page_offset = 0;
2939 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2940 ioa_dump->next_page_index++;
2942 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2944 rem_len = length - bytes_copied;
2945 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2946 cur_len = min(rem_len, rem_page_len);
2948 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2949 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2952 rc = ipr_get_ldump_data_section(ioa_cfg,
2953 pci_address + bytes_copied,
2954 &page[ioa_dump->page_offset / 4],
2955 (cur_len / sizeof(u32)));
2957 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2960 ioa_dump->page_offset += cur_len;
2961 bytes_copied += cur_len;
2969 return bytes_copied;
2973 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2974 * @hdr: dump entry header struct
2979 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2981 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2983 hdr->offset = sizeof(*hdr);
2984 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2988 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2989 * @ioa_cfg: ioa config struct
2990 * @driver_dump: driver dump struct
2995 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2996 struct ipr_driver_dump *driver_dump)
2998 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3000 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
3001 driver_dump->ioa_type_entry.hdr.len =
3002 sizeof(struct ipr_dump_ioa_type_entry) -
3003 sizeof(struct ipr_dump_entry_header);
3004 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3005 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
3006 driver_dump->ioa_type_entry.type = ioa_cfg->type;
3007 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
3008 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
3009 ucode_vpd->minor_release[1];
3010 driver_dump->hdr.num_entries++;
3014 * ipr_dump_version_data - Fill in the driver version in the dump.
3015 * @ioa_cfg: ioa config struct
3016 * @driver_dump: driver dump struct
3021 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
3022 struct ipr_driver_dump *driver_dump)
3024 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
3025 driver_dump->version_entry.hdr.len =
3026 sizeof(struct ipr_dump_version_entry) -
3027 sizeof(struct ipr_dump_entry_header);
3028 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3029 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
3030 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
3031 driver_dump->hdr.num_entries++;
3035 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3036 * @ioa_cfg: ioa config struct
3037 * @driver_dump: driver dump struct
3042 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3043 struct ipr_driver_dump *driver_dump)
3045 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3046 driver_dump->trace_entry.hdr.len =
3047 sizeof(struct ipr_dump_trace_entry) -
3048 sizeof(struct ipr_dump_entry_header);
3049 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3050 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3051 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3052 driver_dump->hdr.num_entries++;
3056 * ipr_dump_location_data - Fill in the IOA location in the dump.
3057 * @ioa_cfg: ioa config struct
3058 * @driver_dump: driver dump struct
3063 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3064 struct ipr_driver_dump *driver_dump)
3066 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3067 driver_dump->location_entry.hdr.len =
3068 sizeof(struct ipr_dump_location_entry) -
3069 sizeof(struct ipr_dump_entry_header);
3070 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3071 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
3072 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
3073 driver_dump->hdr.num_entries++;
3077 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3078 * @ioa_cfg: ioa config struct
3079 * @dump: dump struct
3084 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3086 unsigned long start_addr, sdt_word;
3087 unsigned long lock_flags = 0;
3088 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3089 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
3090 u32 num_entries, max_num_entries, start_off, end_off;
3091 u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
3092 struct ipr_sdt *sdt;
3098 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3100 if (ioa_cfg->sdt_state != READ_DUMP) {
3101 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3105 if (ioa_cfg->sis64) {
3106 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3107 ssleep(IPR_DUMP_DELAY_SECONDS);
3108 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3111 start_addr = readl(ioa_cfg->ioa_mailbox);
3113 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
3114 dev_err(&ioa_cfg->pdev->dev,
3115 "Invalid dump table format: %lx\n", start_addr);
3116 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3120 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3122 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3124 /* Initialize the overall dump header */
3125 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3126 driver_dump->hdr.num_entries = 1;
3127 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3128 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3129 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3130 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3132 ipr_dump_version_data(ioa_cfg, driver_dump);
3133 ipr_dump_location_data(ioa_cfg, driver_dump);
3134 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3135 ipr_dump_trace_data(ioa_cfg, driver_dump);
3137 /* Update dump_header */
3138 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3140 /* IOA Dump entry */
3141 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3142 ioa_dump->hdr.len = 0;
3143 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3144 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3146 /* First entries in sdt are actually a list of dump addresses and
3147 lengths to gather the real dump data. sdt represents the pointer
3148 to the ioa generated dump table. Dump data will be extracted based
3149 on entries in this table */
3150 sdt = &ioa_dump->sdt;
3152 if (ioa_cfg->sis64) {
3153 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3154 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3156 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3157 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3160 bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3161 (max_num_entries * sizeof(struct ipr_sdt_entry));
3162 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3163 bytes_to_copy / sizeof(__be32));
3165 /* Smart Dump table is ready to use and the first entry is valid */
3166 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3167 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3168 dev_err(&ioa_cfg->pdev->dev,
3169 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3170 rc, be32_to_cpu(sdt->hdr.state));
3171 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3172 ioa_cfg->sdt_state = DUMP_OBTAINED;
3173 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3177 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3179 if (num_entries > max_num_entries)
3180 num_entries = max_num_entries;
3182 /* Update dump length to the actual data to be copied */
3183 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3185 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3187 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3189 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3191 for (i = 0; i < num_entries; i++) {
3192 if (ioa_dump->hdr.len > max_dump_size) {
3193 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3197 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3198 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3200 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3202 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3203 end_off = be32_to_cpu(sdt->entry[i].end_token);
3205 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3206 bytes_to_copy = end_off - start_off;
3211 if (bytes_to_copy > max_dump_size) {
3212 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3216 /* Copy data from adapter to driver buffers */
3217 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3220 ioa_dump->hdr.len += bytes_copied;
3222 if (bytes_copied != bytes_to_copy) {
3223 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3230 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3232 /* Update dump_header */
3233 driver_dump->hdr.len += ioa_dump->hdr.len;
3235 ioa_cfg->sdt_state = DUMP_OBTAINED;
3240 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3244 * ipr_release_dump - Free adapter dump memory
3245 * @kref: kref struct
3250 static void ipr_release_dump(struct kref *kref)
3252 struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3253 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3254 unsigned long lock_flags = 0;
3258 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3259 ioa_cfg->dump = NULL;
3260 ioa_cfg->sdt_state = INACTIVE;
3261 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3263 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3264 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3266 vfree(dump->ioa_dump.ioa_data);
3272 * ipr_worker_thread - Worker thread
3273 * @work: ioa config struct
3275 * Called at task level from a work thread. This function takes care
3276 * of adding and removing device from the mid-layer as configuration
3277 * changes are detected by the adapter.
3282 static void ipr_worker_thread(struct work_struct *work)
3284 unsigned long lock_flags;
3285 struct ipr_resource_entry *res;
3286 struct scsi_device *sdev;
3287 struct ipr_dump *dump;
3288 struct ipr_ioa_cfg *ioa_cfg =
3289 container_of(work, struct ipr_ioa_cfg, work_q);
3290 u8 bus, target, lun;
3294 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3296 if (ioa_cfg->sdt_state == READ_DUMP) {
3297 dump = ioa_cfg->dump;
3299 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3302 kref_get(&dump->kref);
3303 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3304 ipr_get_ioa_dump(ioa_cfg, dump);
3305 kref_put(&dump->kref, ipr_release_dump);
3307 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3308 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3309 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3310 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3314 if (!ioa_cfg->scan_enabled) {
3315 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3322 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
3323 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3327 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3328 if (res->del_from_ml && res->sdev) {
3331 if (!scsi_device_get(sdev)) {
3332 if (!res->add_to_ml)
3333 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3335 res->del_from_ml = 0;
3336 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3337 scsi_remove_device(sdev);
3338 scsi_device_put(sdev);
3339 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3346 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3347 if (res->add_to_ml) {
3349 target = res->target;
3352 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3353 scsi_add_device(ioa_cfg->host, bus, target, lun);
3354 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3359 ioa_cfg->scan_done = 1;
3360 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3361 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3365 #ifdef CONFIG_SCSI_IPR_TRACE
3367 * ipr_read_trace - Dump the adapter trace
3368 * @filp: open sysfs file
3369 * @kobj: kobject struct
3370 * @bin_attr: bin_attribute struct
3373 * @count: buffer size
3376 * number of bytes printed to buffer
3378 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3379 struct bin_attribute *bin_attr,
3380 char *buf, loff_t off, size_t count)
3382 struct device *dev = container_of(kobj, struct device, kobj);
3383 struct Scsi_Host *shost = class_to_shost(dev);
3384 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3385 unsigned long lock_flags = 0;
3388 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3389 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3391 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3396 static struct bin_attribute ipr_trace_attr = {
3402 .read = ipr_read_trace,
3407 * ipr_show_fw_version - Show the firmware version
3408 * @dev: class device struct
3412 * number of bytes printed to buffer
3414 static ssize_t ipr_show_fw_version(struct device *dev,
3415 struct device_attribute *attr, char *buf)
3417 struct Scsi_Host *shost = class_to_shost(dev);
3418 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3419 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3420 unsigned long lock_flags = 0;
3423 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3424 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3425 ucode_vpd->major_release, ucode_vpd->card_type,
3426 ucode_vpd->minor_release[0],
3427 ucode_vpd->minor_release[1]);
3428 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3432 static struct device_attribute ipr_fw_version_attr = {
3434 .name = "fw_version",
3437 .show = ipr_show_fw_version,
3441 * ipr_show_log_level - Show the adapter's error logging level
3442 * @dev: class device struct
3446 * number of bytes printed to buffer
3448 static ssize_t ipr_show_log_level(struct device *dev,
3449 struct device_attribute *attr, char *buf)
3451 struct Scsi_Host *shost = class_to_shost(dev);
3452 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3453 unsigned long lock_flags = 0;
3456 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3457 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3458 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3463 * ipr_store_log_level - Change the adapter's error logging level
3464 * @dev: class device struct
3468 * number of bytes printed to buffer
3470 static ssize_t ipr_store_log_level(struct device *dev,
3471 struct device_attribute *attr,
3472 const char *buf, size_t count)
3474 struct Scsi_Host *shost = class_to_shost(dev);
3475 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3476 unsigned long lock_flags = 0;
3478 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3479 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3480 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3484 static struct device_attribute ipr_log_level_attr = {
3486 .name = "log_level",
3487 .mode = S_IRUGO | S_IWUSR,
3489 .show = ipr_show_log_level,
3490 .store = ipr_store_log_level
3494 * ipr_store_diagnostics - IOA Diagnostics interface
3495 * @dev: device struct
3497 * @count: buffer size
3499 * This function will reset the adapter and wait a reasonable
3500 * amount of time for any errors that the adapter might log.
3503 * count on success / other on failure
3505 static ssize_t ipr_store_diagnostics(struct device *dev,
3506 struct device_attribute *attr,
3507 const char *buf, size_t count)
3509 struct Scsi_Host *shost = class_to_shost(dev);
3510 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3511 unsigned long lock_flags = 0;
3514 if (!capable(CAP_SYS_ADMIN))
3517 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3518 while (ioa_cfg->in_reset_reload) {
3519 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3520 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3521 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3524 ioa_cfg->errors_logged = 0;
3525 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3527 if (ioa_cfg->in_reset_reload) {
3528 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3529 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3531 /* Wait for a second for any errors to be logged */
3534 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3538 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3539 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3541 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3546 static struct device_attribute ipr_diagnostics_attr = {
3548 .name = "run_diagnostics",
3551 .store = ipr_store_diagnostics
3555 * ipr_show_adapter_state - Show the adapter's state
3556 * @class_dev: device struct
3560 * number of bytes printed to buffer
3562 static ssize_t ipr_show_adapter_state(struct device *dev,
3563 struct device_attribute *attr, char *buf)
3565 struct Scsi_Host *shost = class_to_shost(dev);
3566 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3567 unsigned long lock_flags = 0;
3570 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3571 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3572 len = snprintf(buf, PAGE_SIZE, "offline\n");
3574 len = snprintf(buf, PAGE_SIZE, "online\n");
3575 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3580 * ipr_store_adapter_state - Change adapter state
3581 * @dev: device struct
3583 * @count: buffer size
3585 * This function will change the adapter's state.
3588 * count on success / other on failure
3590 static ssize_t ipr_store_adapter_state(struct device *dev,
3591 struct device_attribute *attr,
3592 const char *buf, size_t count)
3594 struct Scsi_Host *shost = class_to_shost(dev);
3595 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3596 unsigned long lock_flags;
3597 int result = count, i;
3599 if (!capable(CAP_SYS_ADMIN))
3602 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3603 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3604 !strncmp(buf, "online", 6)) {
3605 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3606 spin_lock(&ioa_cfg->hrrq[i]._lock);
3607 ioa_cfg->hrrq[i].ioa_is_dead = 0;
3608 spin_unlock(&ioa_cfg->hrrq[i]._lock);
3611 ioa_cfg->reset_retries = 0;
3612 ioa_cfg->in_ioa_bringdown = 0;
3613 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3615 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3616 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3621 static struct device_attribute ipr_ioa_state_attr = {
3623 .name = "online_state",
3624 .mode = S_IRUGO | S_IWUSR,
3626 .show = ipr_show_adapter_state,
3627 .store = ipr_store_adapter_state
3631 * ipr_store_reset_adapter - Reset the adapter
3632 * @dev: device struct
3634 * @count: buffer size
3636 * This function will reset the adapter.
3639 * count on success / other on failure
3641 static ssize_t ipr_store_reset_adapter(struct device *dev,
3642 struct device_attribute *attr,
3643 const char *buf, size_t count)
3645 struct Scsi_Host *shost = class_to_shost(dev);
3646 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3647 unsigned long lock_flags;
3650 if (!capable(CAP_SYS_ADMIN))
3653 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3654 if (!ioa_cfg->in_reset_reload)
3655 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3656 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3657 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3662 static struct device_attribute ipr_ioa_reset_attr = {
3664 .name = "reset_host",
3667 .store = ipr_store_reset_adapter
3670 static int ipr_iopoll(struct irq_poll *iop, int budget);
3672 * ipr_show_iopoll_weight - Show ipr polling mode
3673 * @dev: class device struct
3677 * number of bytes printed to buffer
3679 static ssize_t ipr_show_iopoll_weight(struct device *dev,
3680 struct device_attribute *attr, char *buf)
3682 struct Scsi_Host *shost = class_to_shost(dev);
3683 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3684 unsigned long lock_flags = 0;
3687 spin_lock_irqsave(shost->host_lock, lock_flags);
3688 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3689 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3695 * ipr_store_iopoll_weight - Change the adapter's polling mode
3696 * @dev: class device struct
3700 * number of bytes printed to buffer
3702 static ssize_t ipr_store_iopoll_weight(struct device *dev,
3703 struct device_attribute *attr,
3704 const char *buf, size_t count)
3706 struct Scsi_Host *shost = class_to_shost(dev);
3707 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3708 unsigned long user_iopoll_weight;
3709 unsigned long lock_flags = 0;
3712 if (!ioa_cfg->sis64) {
3713 dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n");
3716 if (kstrtoul(buf, 10, &user_iopoll_weight))
3719 if (user_iopoll_weight > 256) {
3720 dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n");
3724 if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3725 dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n");
3729 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3730 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3731 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
3734 spin_lock_irqsave(shost->host_lock, lock_flags);
3735 ioa_cfg->iopoll_weight = user_iopoll_weight;
3736 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3737 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3738 irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
3739 ioa_cfg->iopoll_weight, ipr_iopoll);
3742 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3747 static struct device_attribute ipr_iopoll_weight_attr = {
3749 .name = "iopoll_weight",
3750 .mode = S_IRUGO | S_IWUSR,
3752 .show = ipr_show_iopoll_weight,
3753 .store = ipr_store_iopoll_weight
3757 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3758 * @buf_len: buffer length
3760 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3761 * list to use for microcode download
3764 * pointer to sglist / NULL on failure
3766 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3768 int sg_size, order, bsize_elem, num_elem, i, j;
3769 struct ipr_sglist *sglist;
3770 struct scatterlist *scatterlist;
3773 /* Get the minimum size per scatter/gather element */
3774 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3776 /* Get the actual size per element */
3777 order = get_order(sg_size);
3779 /* Determine the actual number of bytes per element */
3780 bsize_elem = PAGE_SIZE * (1 << order);
3782 /* Determine the actual number of sg entries needed */
3783 if (buf_len % bsize_elem)
3784 num_elem = (buf_len / bsize_elem) + 1;
3786 num_elem = buf_len / bsize_elem;
3788 /* Allocate a scatter/gather list for the DMA */
3789 sglist = kzalloc(sizeof(struct ipr_sglist) +
3790 (sizeof(struct scatterlist) * (num_elem - 1)),
3793 if (sglist == NULL) {
3798 scatterlist = sglist->scatterlist;
3799 sg_init_table(scatterlist, num_elem);
3801 sglist->order = order;
3802 sglist->num_sg = num_elem;
3804 /* Allocate a bunch of sg elements */
3805 for (i = 0; i < num_elem; i++) {
3806 page = alloc_pages(GFP_KERNEL, order);
3810 /* Free up what we already allocated */
3811 for (j = i - 1; j >= 0; j--)
3812 __free_pages(sg_page(&scatterlist[j]), order);
3817 sg_set_page(&scatterlist[i], page, 0, 0);
3824 * ipr_free_ucode_buffer - Frees a microcode download buffer
3825 * @p_dnld: scatter/gather list pointer
3827 * Free a DMA'able ucode download buffer previously allocated with
3828 * ipr_alloc_ucode_buffer
3833 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3837 for (i = 0; i < sglist->num_sg; i++)
3838 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3844 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3845 * @sglist: scatter/gather list pointer
3846 * @buffer: buffer pointer
3847 * @len: buffer length
3849 * Copy a microcode image from a user buffer into a buffer allocated by
3850 * ipr_alloc_ucode_buffer
3853 * 0 on success / other on failure
3855 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3856 u8 *buffer, u32 len)
3858 int bsize_elem, i, result = 0;
3859 struct scatterlist *scatterlist;
3862 /* Determine the actual number of bytes per element */
3863 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3865 scatterlist = sglist->scatterlist;
3867 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3868 struct page *page = sg_page(&scatterlist[i]);
3871 memcpy(kaddr, buffer, bsize_elem);
3874 scatterlist[i].length = bsize_elem;
3882 if (len % bsize_elem) {
3883 struct page *page = sg_page(&scatterlist[i]);
3886 memcpy(kaddr, buffer, len % bsize_elem);
3889 scatterlist[i].length = len % bsize_elem;
3892 sglist->buffer_len = len;
3897 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3898 * @ipr_cmd: ipr command struct
3899 * @sglist: scatter/gather list
3901 * Builds a microcode download IOA data list (IOADL).
3904 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3905 struct ipr_sglist *sglist)
3907 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3908 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3909 struct scatterlist *scatterlist = sglist->scatterlist;
3912 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3913 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3914 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3917 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3918 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3919 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3920 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3921 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3924 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3928 * ipr_build_ucode_ioadl - Build a microcode download IOADL
3929 * @ipr_cmd: ipr command struct
3930 * @sglist: scatter/gather list
3932 * Builds a microcode download IOA data list (IOADL).
3935 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3936 struct ipr_sglist *sglist)
3938 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3939 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3940 struct scatterlist *scatterlist = sglist->scatterlist;
3943 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3944 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3945 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3948 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3950 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3951 ioadl[i].flags_and_data_len =
3952 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3954 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3957 ioadl[i-1].flags_and_data_len |=
3958 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3962 * ipr_update_ioa_ucode - Update IOA's microcode
3963 * @ioa_cfg: ioa config struct
3964 * @sglist: scatter/gather list
3966 * Initiate an adapter reset to update the IOA's microcode
3969 * 0 on success / -EIO on failure
3971 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3972 struct ipr_sglist *sglist)
3974 unsigned long lock_flags;
3976 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3977 while (ioa_cfg->in_reset_reload) {
3978 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3979 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3980 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3983 if (ioa_cfg->ucode_sglist) {
3984 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3985 dev_err(&ioa_cfg->pdev->dev,
3986 "Microcode download already in progress\n");
3990 sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
3991 sglist->scatterlist, sglist->num_sg,
3994 if (!sglist->num_dma_sg) {
3995 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3996 dev_err(&ioa_cfg->pdev->dev,
3997 "Failed to map microcode download buffer!\n");
4001 ioa_cfg->ucode_sglist = sglist;
4002 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
4003 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4004 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4006 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4007 ioa_cfg->ucode_sglist = NULL;
4008 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4013 * ipr_store_update_fw - Update the firmware on the adapter
4014 * @class_dev: device struct
4016 * @count: buffer size
4018 * This function will update the firmware on the adapter.
4021 * count on success / other on failure
4023 static ssize_t ipr_store_update_fw(struct device *dev,
4024 struct device_attribute *attr,
4025 const char *buf, size_t count)
4027 struct Scsi_Host *shost = class_to_shost(dev);
4028 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4029 struct ipr_ucode_image_header *image_hdr;
4030 const struct firmware *fw_entry;
4031 struct ipr_sglist *sglist;
4035 int result, dnld_size;
4037 if (!capable(CAP_SYS_ADMIN))
4040 snprintf(fname, sizeof(fname), "%s", buf);
4042 endline = strchr(fname, '\n');
4046 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
4047 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4051 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4053 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4054 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4055 sglist = ipr_alloc_ucode_buffer(dnld_size);
4058 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4059 release_firmware(fw_entry);
4063 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4066 dev_err(&ioa_cfg->pdev->dev,
4067 "Microcode buffer copy to DMA buffer failed\n");
4071 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
4073 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
4078 ipr_free_ucode_buffer(sglist);
4079 release_firmware(fw_entry);
4083 static struct device_attribute ipr_update_fw_attr = {
4085 .name = "update_fw",
4088 .store = ipr_store_update_fw
4092 * ipr_show_fw_type - Show the adapter's firmware type.
4093 * @dev: class device struct
4097 * number of bytes printed to buffer
4099 static ssize_t ipr_show_fw_type(struct device *dev,
4100 struct device_attribute *attr, char *buf)
4102 struct Scsi_Host *shost = class_to_shost(dev);
4103 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4104 unsigned long lock_flags = 0;
4107 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4108 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4109 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4113 static struct device_attribute ipr_ioa_fw_type_attr = {
4118 .show = ipr_show_fw_type
4121 static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
4122 struct bin_attribute *bin_attr, char *buf,
4123 loff_t off, size_t count)
4125 struct device *cdev = container_of(kobj, struct device, kobj);
4126 struct Scsi_Host *shost = class_to_shost(cdev);
4127 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4128 struct ipr_hostrcb *hostrcb;
4129 unsigned long lock_flags = 0;
4132 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4133 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4134 struct ipr_hostrcb, queue);
4136 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4139 ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam,
4140 sizeof(hostrcb->hcam));
4141 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4145 static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
4146 struct bin_attribute *bin_attr, char *buf,
4147 loff_t off, size_t count)
4149 struct device *cdev = container_of(kobj, struct device, kobj);
4150 struct Scsi_Host *shost = class_to_shost(cdev);
4151 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4152 struct ipr_hostrcb *hostrcb;
4153 unsigned long lock_flags = 0;
4155 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4156 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4157 struct ipr_hostrcb, queue);
4159 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4163 /* Reclaim hostrcb before exit */
4164 list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
4165 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4169 static struct bin_attribute ipr_ioa_async_err_log = {
4171 .name = "async_err_log",
4172 .mode = S_IRUGO | S_IWUSR,
4175 .read = ipr_read_async_err_log,
4176 .write = ipr_next_async_err_log
4179 static struct device_attribute *ipr_ioa_attrs[] = {
4180 &ipr_fw_version_attr,
4181 &ipr_log_level_attr,
4182 &ipr_diagnostics_attr,
4183 &ipr_ioa_state_attr,
4184 &ipr_ioa_reset_attr,
4185 &ipr_update_fw_attr,
4186 &ipr_ioa_fw_type_attr,
4187 &ipr_iopoll_weight_attr,
4191 #ifdef CONFIG_SCSI_IPR_DUMP
4193 * ipr_read_dump - Dump the adapter
4194 * @filp: open sysfs file
4195 * @kobj: kobject struct
4196 * @bin_attr: bin_attribute struct
4199 * @count: buffer size
4202 * number of bytes printed to buffer
4204 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4205 struct bin_attribute *bin_attr,
4206 char *buf, loff_t off, size_t count)
4208 struct device *cdev = container_of(kobj, struct device, kobj);
4209 struct Scsi_Host *shost = class_to_shost(cdev);
4210 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4211 struct ipr_dump *dump;
4212 unsigned long lock_flags = 0;
4217 if (!capable(CAP_SYS_ADMIN))
4220 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4221 dump = ioa_cfg->dump;
4223 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4224 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4227 kref_get(&dump->kref);
4228 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4230 if (off > dump->driver_dump.hdr.len) {
4231 kref_put(&dump->kref, ipr_release_dump);
4235 if (off + count > dump->driver_dump.hdr.len) {
4236 count = dump->driver_dump.hdr.len - off;
4240 if (count && off < sizeof(dump->driver_dump)) {
4241 if (off + count > sizeof(dump->driver_dump))
4242 len = sizeof(dump->driver_dump) - off;
4245 src = (u8 *)&dump->driver_dump + off;
4246 memcpy(buf, src, len);
4252 off -= sizeof(dump->driver_dump);
4255 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4256 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4257 sizeof(struct ipr_sdt_entry));
4259 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4260 (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4262 if (count && off < sdt_end) {
4263 if (off + count > sdt_end)
4264 len = sdt_end - off;
4267 src = (u8 *)&dump->ioa_dump + off;
4268 memcpy(buf, src, len);
4277 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4278 len = PAGE_ALIGN(off) - off;
4281 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4282 src += off & ~PAGE_MASK;
4283 memcpy(buf, src, len);
4289 kref_put(&dump->kref, ipr_release_dump);
4294 * ipr_alloc_dump - Prepare for adapter dump
4295 * @ioa_cfg: ioa config struct
4298 * 0 on success / other on failure
4300 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4302 struct ipr_dump *dump;
4304 unsigned long lock_flags = 0;
4306 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4309 ipr_err("Dump memory allocation failed\n");
4314 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4316 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4319 ipr_err("Dump memory allocation failed\n");
4324 dump->ioa_dump.ioa_data = ioa_data;
4326 kref_init(&dump->kref);
4327 dump->ioa_cfg = ioa_cfg;
4329 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4331 if (INACTIVE != ioa_cfg->sdt_state) {
4332 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4333 vfree(dump->ioa_dump.ioa_data);
4338 ioa_cfg->dump = dump;
4339 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4340 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4341 ioa_cfg->dump_taken = 1;
4342 schedule_work(&ioa_cfg->work_q);
4344 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4350 * ipr_free_dump - Free adapter dump memory
4351 * @ioa_cfg: ioa config struct
4354 * 0 on success / other on failure
4356 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4358 struct ipr_dump *dump;
4359 unsigned long lock_flags = 0;
4363 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4364 dump = ioa_cfg->dump;
4366 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4370 ioa_cfg->dump = NULL;
4371 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4373 kref_put(&dump->kref, ipr_release_dump);
4380 * ipr_write_dump - Setup dump state of adapter
4381 * @filp: open sysfs file
4382 * @kobj: kobject struct
4383 * @bin_attr: bin_attribute struct
4386 * @count: buffer size
4389 * number of bytes printed to buffer
4391 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4392 struct bin_attribute *bin_attr,
4393 char *buf, loff_t off, size_t count)
4395 struct device *cdev = container_of(kobj, struct device, kobj);
4396 struct Scsi_Host *shost = class_to_shost(cdev);
4397 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4400 if (!capable(CAP_SYS_ADMIN))
4404 rc = ipr_alloc_dump(ioa_cfg);
4405 else if (buf[0] == '0')
4406 rc = ipr_free_dump(ioa_cfg);
4416 static struct bin_attribute ipr_dump_attr = {
4419 .mode = S_IRUSR | S_IWUSR,
4422 .read = ipr_read_dump,
4423 .write = ipr_write_dump
4426 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4430 * ipr_change_queue_depth - Change the device's queue depth
4431 * @sdev: scsi device struct
4432 * @qdepth: depth to set
4433 * @reason: calling context
4438 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
4440 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4441 struct ipr_resource_entry *res;
4442 unsigned long lock_flags = 0;
4444 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4445 res = (struct ipr_resource_entry *)sdev->hostdata;
4447 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4448 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4449 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4451 scsi_change_queue_depth(sdev, qdepth);
4452 return sdev->queue_depth;
4456 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4457 * @dev: device struct
4458 * @attr: device attribute structure
4462 * number of bytes printed to buffer
4464 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4466 struct scsi_device *sdev = to_scsi_device(dev);
4467 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4468 struct ipr_resource_entry *res;
4469 unsigned long lock_flags = 0;
4470 ssize_t len = -ENXIO;
4472 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4473 res = (struct ipr_resource_entry *)sdev->hostdata;
4475 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4476 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4480 static struct device_attribute ipr_adapter_handle_attr = {
4482 .name = "adapter_handle",
4485 .show = ipr_show_adapter_handle
4489 * ipr_show_resource_path - Show the resource path or the resource address for
4491 * @dev: device struct
4492 * @attr: device attribute structure
4496 * number of bytes printed to buffer
4498 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4500 struct scsi_device *sdev = to_scsi_device(dev);
4501 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4502 struct ipr_resource_entry *res;
4503 unsigned long lock_flags = 0;
4504 ssize_t len = -ENXIO;
4505 char buffer[IPR_MAX_RES_PATH_LENGTH];
4507 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4508 res = (struct ipr_resource_entry *)sdev->hostdata;
4509 if (res && ioa_cfg->sis64)
4510 len = snprintf(buf, PAGE_SIZE, "%s\n",
4511 __ipr_format_res_path(res->res_path, buffer,
4514 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4515 res->bus, res->target, res->lun);
4517 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4521 static struct device_attribute ipr_resource_path_attr = {
4523 .name = "resource_path",
4526 .show = ipr_show_resource_path
4530 * ipr_show_device_id - Show the device_id for this device.
4531 * @dev: device struct
4532 * @attr: device attribute structure
4536 * number of bytes printed to buffer
4538 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4540 struct scsi_device *sdev = to_scsi_device(dev);
4541 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4542 struct ipr_resource_entry *res;
4543 unsigned long lock_flags = 0;
4544 ssize_t len = -ENXIO;
4546 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4547 res = (struct ipr_resource_entry *)sdev->hostdata;
4548 if (res && ioa_cfg->sis64)
4549 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id));
4551 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4553 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4557 static struct device_attribute ipr_device_id_attr = {
4559 .name = "device_id",
4562 .show = ipr_show_device_id
4566 * ipr_show_resource_type - Show the resource type for this device.
4567 * @dev: device struct
4568 * @attr: device attribute structure
4572 * number of bytes printed to buffer
4574 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4576 struct scsi_device *sdev = to_scsi_device(dev);
4577 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4578 struct ipr_resource_entry *res;
4579 unsigned long lock_flags = 0;
4580 ssize_t len = -ENXIO;
4582 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4583 res = (struct ipr_resource_entry *)sdev->hostdata;
4586 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4588 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4592 static struct device_attribute ipr_resource_type_attr = {
4594 .name = "resource_type",
4597 .show = ipr_show_resource_type
4601 * ipr_show_raw_mode - Show the adapter's raw mode
4602 * @dev: class device struct
4606 * number of bytes printed to buffer
4608 static ssize_t ipr_show_raw_mode(struct device *dev,
4609 struct device_attribute *attr, char *buf)
4611 struct scsi_device *sdev = to_scsi_device(dev);
4612 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4613 struct ipr_resource_entry *res;
4614 unsigned long lock_flags = 0;
4617 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4618 res = (struct ipr_resource_entry *)sdev->hostdata;
4620 len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
4623 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4628 * ipr_store_raw_mode - Change the adapter's raw mode
4629 * @dev: class device struct
4633 * number of bytes printed to buffer
4635 static ssize_t ipr_store_raw_mode(struct device *dev,
4636 struct device_attribute *attr,
4637 const char *buf, size_t count)
4639 struct scsi_device *sdev = to_scsi_device(dev);
4640 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4641 struct ipr_resource_entry *res;
4642 unsigned long lock_flags = 0;
4645 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4646 res = (struct ipr_resource_entry *)sdev->hostdata;
4648 if (ipr_is_af_dasd_device(res)) {
4649 res->raw_mode = simple_strtoul(buf, NULL, 10);
4652 sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
4653 res->raw_mode ? "enabled" : "disabled");
4658 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4662 static struct device_attribute ipr_raw_mode_attr = {
4665 .mode = S_IRUGO | S_IWUSR,
4667 .show = ipr_show_raw_mode,
4668 .store = ipr_store_raw_mode
4671 static struct device_attribute *ipr_dev_attrs[] = {
4672 &ipr_adapter_handle_attr,
4673 &ipr_resource_path_attr,
4674 &ipr_device_id_attr,
4675 &ipr_resource_type_attr,
4681 * ipr_biosparam - Return the HSC mapping
4682 * @sdev: scsi device struct
4683 * @block_device: block device pointer
4684 * @capacity: capacity of the device
4685 * @parm: Array containing returned HSC values.
4687 * This function generates the HSC parms that fdisk uses.
4688 * We want to make sure we return something that places partitions
4689 * on 4k boundaries for best performance with the IOA.
4694 static int ipr_biosparam(struct scsi_device *sdev,
4695 struct block_device *block_device,
4696 sector_t capacity, int *parm)
4704 cylinders = capacity;
4705 sector_div(cylinders, (128 * 32));
4710 parm[2] = cylinders;
4716 * ipr_find_starget - Find target based on bus/target.
4717 * @starget: scsi target struct
4720 * resource entry pointer if found / NULL if not found
4722 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4724 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4725 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4726 struct ipr_resource_entry *res;
4728 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4729 if ((res->bus == starget->channel) &&
4730 (res->target == starget->id)) {
4738 static struct ata_port_info sata_port_info;
4741 * ipr_target_alloc - Prepare for commands to a SCSI target
4742 * @starget: scsi target struct
4744 * If the device is a SATA device, this function allocates an
4745 * ATA port with libata, else it does nothing.
4748 * 0 on success / non-0 on failure
4750 static int ipr_target_alloc(struct scsi_target *starget)
4752 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4753 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4754 struct ipr_sata_port *sata_port;
4755 struct ata_port *ap;
4756 struct ipr_resource_entry *res;
4757 unsigned long lock_flags;
4759 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4760 res = ipr_find_starget(starget);
4761 starget->hostdata = NULL;
4763 if (res && ipr_is_gata(res)) {
4764 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4765 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4769 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4771 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4772 sata_port->ioa_cfg = ioa_cfg;
4774 sata_port->res = res;
4776 res->sata_port = sata_port;
4777 ap->private_data = sata_port;
4778 starget->hostdata = sata_port;
4784 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4790 * ipr_target_destroy - Destroy a SCSI target
4791 * @starget: scsi target struct
4793 * If the device was a SATA device, this function frees the libata
4794 * ATA port, else it does nothing.
4797 static void ipr_target_destroy(struct scsi_target *starget)
4799 struct ipr_sata_port *sata_port = starget->hostdata;
4800 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4801 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4803 if (ioa_cfg->sis64) {
4804 if (!ipr_find_starget(starget)) {
4805 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4806 clear_bit(starget->id, ioa_cfg->array_ids);
4807 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4808 clear_bit(starget->id, ioa_cfg->vset_ids);
4809 else if (starget->channel == 0)
4810 clear_bit(starget->id, ioa_cfg->target_ids);
4815 starget->hostdata = NULL;
4816 ata_sas_port_destroy(sata_port->ap);
4822 * ipr_find_sdev - Find device based on bus/target/lun.
4823 * @sdev: scsi device struct
4826 * resource entry pointer if found / NULL if not found
4828 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4830 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4831 struct ipr_resource_entry *res;
4833 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4834 if ((res->bus == sdev->channel) &&
4835 (res->target == sdev->id) &&
4836 (res->lun == sdev->lun))
4844 * ipr_slave_destroy - Unconfigure a SCSI device
4845 * @sdev: scsi device struct
4850 static void ipr_slave_destroy(struct scsi_device *sdev)
4852 struct ipr_resource_entry *res;
4853 struct ipr_ioa_cfg *ioa_cfg;
4854 unsigned long lock_flags = 0;
4856 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4858 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4859 res = (struct ipr_resource_entry *) sdev->hostdata;
4862 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4863 sdev->hostdata = NULL;
4865 res->sata_port = NULL;
4867 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4871 * ipr_slave_configure - Configure a SCSI device
4872 * @sdev: scsi device struct
4874 * This function configures the specified scsi device.
4879 static int ipr_slave_configure(struct scsi_device *sdev)
4881 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4882 struct ipr_resource_entry *res;
4883 struct ata_port *ap = NULL;
4884 unsigned long lock_flags = 0;
4885 char buffer[IPR_MAX_RES_PATH_LENGTH];
4887 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4888 res = sdev->hostdata;
4890 if (ipr_is_af_dasd_device(res))
4891 sdev->type = TYPE_RAID;
4892 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4893 sdev->scsi_level = 4;
4894 sdev->no_uld_attach = 1;
4896 if (ipr_is_vset_device(res)) {
4897 sdev->scsi_level = SCSI_SPC_3;
4898 blk_queue_rq_timeout(sdev->request_queue,
4899 IPR_VSET_RW_TIMEOUT);
4900 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4902 if (ipr_is_gata(res) && res->sata_port)
4903 ap = res->sata_port->ap;
4904 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4907 scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
4908 ata_sas_slave_configure(sdev, ap);
4912 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4913 ipr_format_res_path(ioa_cfg,
4914 res->res_path, buffer, sizeof(buffer)));
4917 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4922 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4923 * @sdev: scsi device struct
4925 * This function initializes an ATA port so that future commands
4926 * sent through queuecommand will work.
4931 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4933 struct ipr_sata_port *sata_port = NULL;
4937 if (sdev->sdev_target)
4938 sata_port = sdev->sdev_target->hostdata;
4940 rc = ata_sas_port_init(sata_port->ap);
4942 rc = ata_sas_sync_probe(sata_port->ap);
4946 ipr_slave_destroy(sdev);
4953 * ipr_slave_alloc - Prepare for commands to a device.
4954 * @sdev: scsi device struct
4956 * This function saves a pointer to the resource entry
4957 * in the scsi device struct if the device exists. We
4958 * can then use this pointer in ipr_queuecommand when
4959 * handling new commands.
4962 * 0 on success / -ENXIO if device does not exist
4964 static int ipr_slave_alloc(struct scsi_device *sdev)
4966 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4967 struct ipr_resource_entry *res;
4968 unsigned long lock_flags;
4971 sdev->hostdata = NULL;
4973 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4975 res = ipr_find_sdev(sdev);
4980 sdev->hostdata = res;
4981 if (!ipr_is_naca_model(res))
4982 res->needs_sync_complete = 1;
4984 if (ipr_is_gata(res)) {
4985 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4986 return ipr_ata_slave_alloc(sdev);
4990 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4996 * ipr_match_lun - Match function for specified LUN
4997 * @ipr_cmd: ipr command struct
4998 * @device: device to match (sdev)
5001 * 1 if command matches sdev / 0 if command does not match sdev
5003 static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
5005 if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
5011 * ipr_wait_for_ops - Wait for matching commands to complete
5012 * @ipr_cmd: ipr command struct
5013 * @device: device to match (sdev)
5014 * @match: match function to use
5019 static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
5020 int (*match)(struct ipr_cmnd *, void *))
5022 struct ipr_cmnd *ipr_cmd;
5024 unsigned long flags;
5025 struct ipr_hrr_queue *hrrq;
5026 signed long timeout = IPR_ABORT_TASK_TIMEOUT;
5027 DECLARE_COMPLETION_ONSTACK(comp);
5033 for_each_hrrq(hrrq, ioa_cfg) {
5034 spin_lock_irqsave(hrrq->lock, flags);
5035 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5036 if (match(ipr_cmd, device)) {
5037 ipr_cmd->eh_comp = ∁
5041 spin_unlock_irqrestore(hrrq->lock, flags);
5045 timeout = wait_for_completion_timeout(&comp, timeout);
5050 for_each_hrrq(hrrq, ioa_cfg) {
5051 spin_lock_irqsave(hrrq->lock, flags);
5052 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5053 if (match(ipr_cmd, device)) {
5054 ipr_cmd->eh_comp = NULL;
5058 spin_unlock_irqrestore(hrrq->lock, flags);
5062 dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
5064 return wait ? FAILED : SUCCESS;
5073 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
5075 struct ipr_ioa_cfg *ioa_cfg;
5076 unsigned long lock_flags = 0;
5080 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5081 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5083 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5084 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5085 dev_err(&ioa_cfg->pdev->dev,
5086 "Adapter being reset as a result of error recovery.\n");
5088 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5089 ioa_cfg->sdt_state = GET_DUMP;
5092 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5093 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5094 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5096 /* If we got hit with a host reset while we were already resetting
5097 the adapter for some reason, and the reset failed. */
5098 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5103 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5109 * ipr_device_reset - Reset the device
5110 * @ioa_cfg: ioa config struct
5111 * @res: resource entry struct
5113 * This function issues a device reset to the affected device.
5114 * If the device is a SCSI device, a LUN reset will be sent
5115 * to the device first. If that does not work, a target reset
5116 * will be sent. If the device is a SATA device, a PHY reset will
5120 * 0 on success / non-zero on failure
5122 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
5123 struct ipr_resource_entry *res)
5125 struct ipr_cmnd *ipr_cmd;
5126 struct ipr_ioarcb *ioarcb;
5127 struct ipr_cmd_pkt *cmd_pkt;
5128 struct ipr_ioarcb_ata_regs *regs;
5132 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5133 ioarcb = &ipr_cmd->ioarcb;
5134 cmd_pkt = &ioarcb->cmd_pkt;
5136 if (ipr_cmd->ioa_cfg->sis64) {
5137 regs = &ipr_cmd->i.ata_ioadl.regs;
5138 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5140 regs = &ioarcb->u.add_data.u.regs;
5142 ioarcb->res_handle = res->res_handle;
5143 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5144 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5145 if (ipr_is_gata(res)) {
5146 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
5147 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
5148 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5151 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5152 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5153 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5154 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
5155 if (ipr_cmd->ioa_cfg->sis64)
5156 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5157 sizeof(struct ipr_ioasa_gata));
5159 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5160 sizeof(struct ipr_ioasa_gata));
5164 return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
5168 * ipr_sata_reset - Reset the SATA port
5169 * @link: SATA link to reset
5170 * @classes: class of the attached device
5172 * This function issues a SATA phy reset to the affected ATA link.
5175 * 0 on success / non-zero on failure
5177 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
5178 unsigned long deadline)
5180 struct ipr_sata_port *sata_port = link->ap->private_data;
5181 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5182 struct ipr_resource_entry *res;
5183 unsigned long lock_flags = 0;
5187 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5188 while (ioa_cfg->in_reset_reload) {
5189 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5190 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5191 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5194 res = sata_port->res;
5196 rc = ipr_device_reset(ioa_cfg, res);
5197 *classes = res->ata_class;
5200 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5206 * ipr_eh_dev_reset - Reset the device
5207 * @scsi_cmd: scsi command struct
5209 * This function issues a device reset to the affected device.
5210 * A LUN reset will be sent to the device first. If that does
5211 * not work, a target reset will be sent.
5216 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
5218 struct ipr_cmnd *ipr_cmd;
5219 struct ipr_ioa_cfg *ioa_cfg;
5220 struct ipr_resource_entry *res;
5221 struct ata_port *ap;
5223 struct ipr_hrr_queue *hrrq;
5226 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5227 res = scsi_cmd->device->hostdata;
5233 * If we are currently going through reset/reload, return failed. This will force the
5234 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5237 if (ioa_cfg->in_reset_reload)
5239 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5242 for_each_hrrq(hrrq, ioa_cfg) {
5243 spin_lock(&hrrq->_lock);
5244 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5245 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5246 if (ipr_cmd->scsi_cmd)
5247 ipr_cmd->done = ipr_scsi_eh_done;
5249 ipr_cmd->done = ipr_sata_eh_done;
5251 !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5252 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5253 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5257 spin_unlock(&hrrq->_lock);
5259 res->resetting_device = 1;
5260 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
5262 if (ipr_is_gata(res) && res->sata_port) {
5263 ap = res->sata_port->ap;
5264 spin_unlock_irq(scsi_cmd->device->host->host_lock);
5265 ata_std_error_handler(ap);
5266 spin_lock_irq(scsi_cmd->device->host->host_lock);
5268 for_each_hrrq(hrrq, ioa_cfg) {
5269 spin_lock(&hrrq->_lock);
5270 list_for_each_entry(ipr_cmd,
5271 &hrrq->hrrq_pending_q, queue) {
5272 if (ipr_cmd->ioarcb.res_handle ==
5278 spin_unlock(&hrrq->_lock);
5281 rc = ipr_device_reset(ioa_cfg, res);
5282 res->resetting_device = 0;
5283 res->reset_occurred = 1;
5286 return rc ? FAILED : SUCCESS;
5289 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5292 struct ipr_ioa_cfg *ioa_cfg;
5294 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5296 spin_lock_irq(cmd->device->host->host_lock);
5297 rc = __ipr_eh_dev_reset(cmd);
5298 spin_unlock_irq(cmd->device->host->host_lock);
5301 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5307 * ipr_bus_reset_done - Op done function for bus reset.
5308 * @ipr_cmd: ipr command struct
5310 * This function is the op done function for a bus reset
5315 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5317 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5318 struct ipr_resource_entry *res;
5321 if (!ioa_cfg->sis64)
5322 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5323 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5324 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5330 * If abort has not completed, indicate the reset has, else call the
5331 * abort's done function to wake the sleeping eh thread
5333 if (ipr_cmd->sibling->sibling)
5334 ipr_cmd->sibling->sibling = NULL;
5336 ipr_cmd->sibling->done(ipr_cmd->sibling);
5338 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5343 * ipr_abort_timeout - An abort task has timed out
5344 * @ipr_cmd: ipr command struct
5346 * This function handles when an abort task times out. If this
5347 * happens we issue a bus reset since we have resources tied
5348 * up that must be freed before returning to the midlayer.
5353 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
5355 struct ipr_cmnd *reset_cmd;
5356 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5357 struct ipr_cmd_pkt *cmd_pkt;
5358 unsigned long lock_flags = 0;
5361 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5362 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5363 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5367 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5368 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5369 ipr_cmd->sibling = reset_cmd;
5370 reset_cmd->sibling = ipr_cmd;
5371 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5372 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5373 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5374 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5375 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5377 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5378 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5383 * ipr_cancel_op - Cancel specified op
5384 * @scsi_cmd: scsi command struct
5386 * This function cancels specified op.
5391 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5393 struct ipr_cmnd *ipr_cmd;
5394 struct ipr_ioa_cfg *ioa_cfg;
5395 struct ipr_resource_entry *res;
5396 struct ipr_cmd_pkt *cmd_pkt;
5399 struct ipr_hrr_queue *hrrq;
5402 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5403 res = scsi_cmd->device->hostdata;
5405 /* If we are currently going through reset/reload, return failed.
5406 * This will force the mid-layer to call ipr_eh_host_reset,
5407 * which will then go to sleep and wait for the reset to complete
5409 if (ioa_cfg->in_reset_reload ||
5410 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5416 * If we are aborting a timed out op, chances are that the timeout was caused
5417 * by a still not detected EEH error. In such cases, reading a register will
5418 * trigger the EEH recovery infrastructure.
5420 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5422 if (!ipr_is_gscsi(res))
5425 for_each_hrrq(hrrq, ioa_cfg) {
5426 spin_lock(&hrrq->_lock);
5427 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5428 if (ipr_cmd->scsi_cmd == scsi_cmd) {
5429 ipr_cmd->done = ipr_scsi_eh_done;
5434 spin_unlock(&hrrq->_lock);
5440 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5441 ipr_cmd->ioarcb.res_handle = res->res_handle;
5442 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5443 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5444 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5445 ipr_cmd->u.sdev = scsi_cmd->device;
5447 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5449 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5450 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5453 * If the abort task timed out and we sent a bus reset, we will get
5454 * one the following responses to the abort
5456 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5461 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5462 if (!ipr_is_naca_model(res))
5463 res->needs_sync_complete = 1;
5466 return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5470 * ipr_eh_abort - Abort a single op
5471 * @scsi_cmd: scsi command struct
5474 * 0 if scan in progress / 1 if scan is complete
5476 static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5478 unsigned long lock_flags;
5479 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5482 spin_lock_irqsave(shost->host_lock, lock_flags);
5483 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5485 if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5487 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5492 * ipr_eh_host_reset - Reset the host adapter
5493 * @scsi_cmd: scsi command struct
5498 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5500 unsigned long flags;
5502 struct ipr_ioa_cfg *ioa_cfg;
5506 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5508 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5509 rc = ipr_cancel_op(scsi_cmd);
5510 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5513 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
5519 * ipr_handle_other_interrupt - Handle "other" interrupts
5520 * @ioa_cfg: ioa config struct
5521 * @int_reg: interrupt register
5524 * IRQ_NONE / IRQ_HANDLED
5526 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5529 irqreturn_t rc = IRQ_HANDLED;
5532 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5533 int_reg &= ~int_mask_reg;
5535 /* If an interrupt on the adapter did not occur, ignore it.
5536 * Or in the case of SIS 64, check for a stage change interrupt.
5538 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5539 if (ioa_cfg->sis64) {
5540 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5541 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5542 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5544 /* clear stage change */
5545 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5546 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5547 list_del(&ioa_cfg->reset_cmd->queue);
5548 del_timer(&ioa_cfg->reset_cmd->timer);
5549 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5557 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5558 /* Mask the interrupt */
5559 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5560 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5562 list_del(&ioa_cfg->reset_cmd->queue);
5563 del_timer(&ioa_cfg->reset_cmd->timer);
5564 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5565 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5566 if (ioa_cfg->clear_isr) {
5567 if (ipr_debug && printk_ratelimit())
5568 dev_err(&ioa_cfg->pdev->dev,
5569 "Spurious interrupt detected. 0x%08X\n", int_reg);
5570 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5571 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5575 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5576 ioa_cfg->ioa_unit_checked = 1;
5577 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5578 dev_err(&ioa_cfg->pdev->dev,
5579 "No Host RRQ. 0x%08X\n", int_reg);
5581 dev_err(&ioa_cfg->pdev->dev,
5582 "Permanent IOA failure. 0x%08X\n", int_reg);
5584 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5585 ioa_cfg->sdt_state = GET_DUMP;
5587 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5588 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5595 * ipr_isr_eh - Interrupt service routine error handler
5596 * @ioa_cfg: ioa config struct
5597 * @msg: message to log
5602 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5604 ioa_cfg->errors_logged++;
5605 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5607 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5608 ioa_cfg->sdt_state = GET_DUMP;
5610 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5613 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5614 struct list_head *doneq)
5618 struct ipr_cmnd *ipr_cmd;
5619 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5622 /* If interrupts are disabled, ignore the interrupt */
5623 if (!hrr_queue->allow_interrupts)
5626 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5627 hrr_queue->toggle_bit) {
5629 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5630 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5631 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5633 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5634 cmd_index < hrr_queue->min_cmd_id)) {
5636 "Invalid response handle from IOA: ",
5641 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5642 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5644 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5646 list_move_tail(&ipr_cmd->queue, doneq);
5648 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5649 hrr_queue->hrrq_curr++;
5651 hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5652 hrr_queue->toggle_bit ^= 1u;
5655 if (budget > 0 && num_hrrq >= budget)
5662 static int ipr_iopoll(struct irq_poll *iop, int budget)
5664 struct ipr_ioa_cfg *ioa_cfg;
5665 struct ipr_hrr_queue *hrrq;
5666 struct ipr_cmnd *ipr_cmd, *temp;
5667 unsigned long hrrq_flags;
5671 hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5672 ioa_cfg = hrrq->ioa_cfg;
5674 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5675 completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5677 if (completed_ops < budget)
5678 irq_poll_complete(iop);
5679 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5681 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5682 list_del(&ipr_cmd->queue);
5683 del_timer(&ipr_cmd->timer);
5684 ipr_cmd->fast_done(ipr_cmd);
5687 return completed_ops;
5691 * ipr_isr - Interrupt service routine
5693 * @devp: pointer to ioa config struct
5696 * IRQ_NONE / IRQ_HANDLED
5698 static irqreturn_t ipr_isr(int irq, void *devp)
5700 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5701 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5702 unsigned long hrrq_flags = 0;
5706 struct ipr_cmnd *ipr_cmd, *temp;
5707 irqreturn_t rc = IRQ_NONE;
5710 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5711 /* If interrupts are disabled, ignore the interrupt */
5712 if (!hrrq->allow_interrupts) {
5713 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5718 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5721 if (!ioa_cfg->clear_isr)
5724 /* Clear the PCI interrupt */
5727 writel(IPR_PCII_HRRQ_UPDATED,
5728 ioa_cfg->regs.clr_interrupt_reg32);
5729 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5730 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5731 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5733 } else if (rc == IRQ_NONE && irq_none == 0) {
5734 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5736 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5737 int_reg & IPR_PCII_HRRQ_UPDATED) {
5739 "Error clearing HRRQ: ", num_hrrq);
5746 if (unlikely(rc == IRQ_NONE))
5747 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5749 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5750 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5751 list_del(&ipr_cmd->queue);
5752 del_timer(&ipr_cmd->timer);
5753 ipr_cmd->fast_done(ipr_cmd);
5759 * ipr_isr_mhrrq - Interrupt service routine
5761 * @devp: pointer to ioa config struct
5764 * IRQ_NONE / IRQ_HANDLED
5766 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5768 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5769 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5770 unsigned long hrrq_flags = 0;
5771 struct ipr_cmnd *ipr_cmd, *temp;
5772 irqreturn_t rc = IRQ_NONE;
5775 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5777 /* If interrupts are disabled, ignore the interrupt */
5778 if (!hrrq->allow_interrupts) {
5779 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5783 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5784 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5786 irq_poll_sched(&hrrq->iopoll);
5787 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5791 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5794 if (ipr_process_hrrq(hrrq, -1, &doneq))
5798 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5800 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5801 list_del(&ipr_cmd->queue);
5802 del_timer(&ipr_cmd->timer);
5803 ipr_cmd->fast_done(ipr_cmd);
5809 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5810 * @ioa_cfg: ioa config struct
5811 * @ipr_cmd: ipr command struct
5814 * 0 on success / -1 on failure
5816 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5817 struct ipr_cmnd *ipr_cmd)
5820 struct scatterlist *sg;
5822 u32 ioadl_flags = 0;
5823 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5824 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5825 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5827 length = scsi_bufflen(scsi_cmd);
5831 nseg = scsi_dma_map(scsi_cmd);
5833 if (printk_ratelimit())
5834 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5838 ipr_cmd->dma_use_sg = nseg;
5840 ioarcb->data_transfer_length = cpu_to_be32(length);
5842 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5844 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5845 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5846 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5847 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5848 ioadl_flags = IPR_IOADL_FLAGS_READ;
5850 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5851 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5852 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5853 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5856 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5861 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5862 * @ioa_cfg: ioa config struct
5863 * @ipr_cmd: ipr command struct
5866 * 0 on success / -1 on failure
5868 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5869 struct ipr_cmnd *ipr_cmd)
5872 struct scatterlist *sg;
5874 u32 ioadl_flags = 0;
5875 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5876 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5877 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5879 length = scsi_bufflen(scsi_cmd);
5883 nseg = scsi_dma_map(scsi_cmd);
5885 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5889 ipr_cmd->dma_use_sg = nseg;
5891 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5892 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5893 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5894 ioarcb->data_transfer_length = cpu_to_be32(length);
5896 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5897 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5898 ioadl_flags = IPR_IOADL_FLAGS_READ;
5899 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5900 ioarcb->read_ioadl_len =
5901 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5904 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5905 ioadl = ioarcb->u.add_data.u.ioadl;
5906 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5907 offsetof(struct ipr_ioarcb, u.add_data));
5908 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5911 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5912 ioadl[i].flags_and_data_len =
5913 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5914 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5917 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5922 * ipr_erp_done - Process completion of ERP for a device
5923 * @ipr_cmd: ipr command struct
5925 * This function copies the sense buffer into the scsi_cmd
5926 * struct and pushes the scsi_done function.
5931 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5933 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5934 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5935 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5937 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5938 scsi_cmd->result |= (DID_ERROR << 16);
5939 scmd_printk(KERN_ERR, scsi_cmd,
5940 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
5942 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5943 SCSI_SENSE_BUFFERSIZE);
5947 if (!ipr_is_naca_model(res))
5948 res->needs_sync_complete = 1;
5951 scsi_dma_unmap(ipr_cmd->scsi_cmd);
5952 scsi_cmd->scsi_done(scsi_cmd);
5953 if (ipr_cmd->eh_comp)
5954 complete(ipr_cmd->eh_comp);
5955 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5959 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5960 * @ipr_cmd: ipr command struct
5965 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5967 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5968 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5969 dma_addr_t dma_addr = ipr_cmd->dma_addr;
5971 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
5972 ioarcb->data_transfer_length = 0;
5973 ioarcb->read_data_transfer_length = 0;
5974 ioarcb->ioadl_len = 0;
5975 ioarcb->read_ioadl_len = 0;
5976 ioasa->hdr.ioasc = 0;
5977 ioasa->hdr.residual_data_len = 0;
5979 if (ipr_cmd->ioa_cfg->sis64)
5980 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5981 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5983 ioarcb->write_ioadl_addr =
5984 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5985 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5990 * ipr_erp_request_sense - Send request sense to a device
5991 * @ipr_cmd: ipr command struct
5993 * This function sends a request sense to a device as a result
5994 * of a check condition.
5999 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6001 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6002 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6004 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6005 ipr_erp_done(ipr_cmd);
6009 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6011 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
6012 cmd_pkt->cdb[0] = REQUEST_SENSE;
6013 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
6014 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
6015 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6016 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
6018 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
6019 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
6021 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
6022 IPR_REQUEST_SENSE_TIMEOUT * 2);
6026 * ipr_erp_cancel_all - Send cancel all to a device
6027 * @ipr_cmd: ipr command struct
6029 * This function sends a cancel all to a device to clear the
6030 * queue. If we are running TCQ on the device, QERR is set to 1,
6031 * which means all outstanding ops have been dropped on the floor.
6032 * Cancel all will return them to us.
6037 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
6039 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6040 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6041 struct ipr_cmd_pkt *cmd_pkt;
6045 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6047 if (!scsi_cmd->device->simple_tags) {
6048 ipr_erp_request_sense(ipr_cmd);
6052 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6053 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
6054 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
6056 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
6057 IPR_CANCEL_ALL_TIMEOUT);
6061 * ipr_dump_ioasa - Dump contents of IOASA
6062 * @ioa_cfg: ioa config struct
6063 * @ipr_cmd: ipr command struct
6064 * @res: resource entry struct
6066 * This function is invoked by the interrupt handler when ops
6067 * fail. It will log the IOASA if appropriate. Only called
6073 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
6074 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
6078 u32 ioasc, fd_ioasc;
6079 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6080 __be32 *ioasa_data = (__be32 *)ioasa;
6083 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
6084 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
6089 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
6092 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
6093 error_index = ipr_get_error(fd_ioasc);
6095 error_index = ipr_get_error(ioasc);
6097 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
6098 /* Don't log an error if the IOA already logged one */
6099 if (ioasa->hdr.ilid != 0)
6102 if (!ipr_is_gscsi(res))
6105 if (ipr_error_table[error_index].log_ioasa == 0)
6109 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
6111 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
6112 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
6113 data_len = sizeof(struct ipr_ioasa64);
6114 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
6115 data_len = sizeof(struct ipr_ioasa);
6117 ipr_err("IOASA Dump:\n");
6119 for (i = 0; i < data_len / 4; i += 4) {
6120 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
6121 be32_to_cpu(ioasa_data[i]),
6122 be32_to_cpu(ioasa_data[i+1]),
6123 be32_to_cpu(ioasa_data[i+2]),
6124 be32_to_cpu(ioasa_data[i+3]));
6129 * ipr_gen_sense - Generate SCSI sense data from an IOASA
6131 * @sense_buf: sense data buffer
6136 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
6139 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
6140 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
6141 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6142 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
6144 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
6146 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
6149 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
6151 if (ipr_is_vset_device(res) &&
6152 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
6153 ioasa->u.vset.failing_lba_hi != 0) {
6154 sense_buf[0] = 0x72;
6155 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
6156 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
6157 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
6161 sense_buf[9] = 0x0A;
6162 sense_buf[10] = 0x80;
6164 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
6166 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
6167 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
6168 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
6169 sense_buf[15] = failing_lba & 0x000000ff;
6171 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6173 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
6174 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
6175 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
6176 sense_buf[19] = failing_lba & 0x000000ff;
6178 sense_buf[0] = 0x70;
6179 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
6180 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
6181 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
6183 /* Illegal request */
6184 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
6185 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
6186 sense_buf[7] = 10; /* additional length */
6188 /* IOARCB was in error */
6189 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
6190 sense_buf[15] = 0xC0;
6191 else /* Parameter data was invalid */
6192 sense_buf[15] = 0x80;
6195 ((IPR_FIELD_POINTER_MASK &
6196 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
6198 (IPR_FIELD_POINTER_MASK &
6199 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
6201 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
6202 if (ipr_is_vset_device(res))
6203 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6205 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6207 sense_buf[0] |= 0x80; /* Or in the Valid bit */
6208 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6209 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6210 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6211 sense_buf[6] = failing_lba & 0x000000ff;
6214 sense_buf[7] = 6; /* additional length */
6220 * ipr_get_autosense - Copy autosense data to sense buffer
6221 * @ipr_cmd: ipr command struct
6223 * This function copies the autosense buffer to the buffer
6224 * in the scsi_cmd, if there is autosense available.
6227 * 1 if autosense was available / 0 if not
6229 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6231 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6232 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
6234 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
6237 if (ipr_cmd->ioa_cfg->sis64)
6238 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6239 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6240 SCSI_SENSE_BUFFERSIZE));
6242 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6243 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6244 SCSI_SENSE_BUFFERSIZE));
6249 * ipr_erp_start - Process an error response for a SCSI op
6250 * @ioa_cfg: ioa config struct
6251 * @ipr_cmd: ipr command struct
6253 * This function determines whether or not to initiate ERP
6254 * on the affected device.
6259 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6260 struct ipr_cmnd *ipr_cmd)
6262 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6263 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6264 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6265 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
6268 ipr_scsi_eh_done(ipr_cmd);
6272 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6273 ipr_gen_sense(ipr_cmd);
6275 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6277 switch (masked_ioasc) {
6278 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6279 if (ipr_is_naca_model(res))
6280 scsi_cmd->result |= (DID_ABORT << 16);
6282 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6284 case IPR_IOASC_IR_RESOURCE_HANDLE:
6285 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6286 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6288 case IPR_IOASC_HW_SEL_TIMEOUT:
6289 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6290 if (!ipr_is_naca_model(res))
6291 res->needs_sync_complete = 1;
6293 case IPR_IOASC_SYNC_REQUIRED:
6295 res->needs_sync_complete = 1;
6296 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6298 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6299 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6300 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6302 case IPR_IOASC_BUS_WAS_RESET:
6303 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6305 * Report the bus reset and ask for a retry. The device
6306 * will give CC/UA the next command.
6308 if (!res->resetting_device)
6309 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6310 scsi_cmd->result |= (DID_ERROR << 16);
6311 if (!ipr_is_naca_model(res))
6312 res->needs_sync_complete = 1;
6314 case IPR_IOASC_HW_DEV_BUS_STATUS:
6315 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6316 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6317 if (!ipr_get_autosense(ipr_cmd)) {
6318 if (!ipr_is_naca_model(res)) {
6319 ipr_erp_cancel_all(ipr_cmd);
6324 if (!ipr_is_naca_model(res))
6325 res->needs_sync_complete = 1;
6327 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6329 case IPR_IOASC_IR_NON_OPTIMIZED:
6330 if (res->raw_mode) {
6332 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6334 scsi_cmd->result |= (DID_ERROR << 16);
6337 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6338 scsi_cmd->result |= (DID_ERROR << 16);
6339 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6340 res->needs_sync_complete = 1;
6344 scsi_dma_unmap(ipr_cmd->scsi_cmd);
6345 scsi_cmd->scsi_done(scsi_cmd);
6346 if (ipr_cmd->eh_comp)
6347 complete(ipr_cmd->eh_comp);
6348 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6352 * ipr_scsi_done - mid-layer done function
6353 * @ipr_cmd: ipr command struct
6355 * This function is invoked by the interrupt handler for
6356 * ops generated by the SCSI mid-layer
6361 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6363 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6364 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6365 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6366 unsigned long lock_flags;
6368 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6370 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6371 scsi_dma_unmap(scsi_cmd);
6373 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
6374 scsi_cmd->scsi_done(scsi_cmd);
6375 if (ipr_cmd->eh_comp)
6376 complete(ipr_cmd->eh_comp);
6377 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6378 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
6380 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6381 spin_lock(&ipr_cmd->hrrq->_lock);
6382 ipr_erp_start(ioa_cfg, ipr_cmd);
6383 spin_unlock(&ipr_cmd->hrrq->_lock);
6384 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6389 * ipr_queuecommand - Queue a mid-layer request
6390 * @shost: scsi host struct
6391 * @scsi_cmd: scsi command struct
6393 * This function queues a request generated by the mid-layer.
6397 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6398 * SCSI_MLQUEUE_HOST_BUSY if host is busy
6400 static int ipr_queuecommand(struct Scsi_Host *shost,
6401 struct scsi_cmnd *scsi_cmd)
6403 struct ipr_ioa_cfg *ioa_cfg;
6404 struct ipr_resource_entry *res;
6405 struct ipr_ioarcb *ioarcb;
6406 struct ipr_cmnd *ipr_cmd;
6407 unsigned long hrrq_flags, lock_flags;
6409 struct ipr_hrr_queue *hrrq;
6412 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6414 scsi_cmd->result = (DID_OK << 16);
6415 res = scsi_cmd->device->hostdata;
6417 if (ipr_is_gata(res) && res->sata_port) {
6418 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6419 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6420 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6424 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6425 hrrq = &ioa_cfg->hrrq[hrrq_id];
6427 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6429 * We are currently blocking all devices due to a host reset
6430 * We have told the host to stop giving us new requests, but
6431 * ERP ops don't count. FIXME
6433 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6434 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6435 return SCSI_MLQUEUE_HOST_BUSY;
6439 * FIXME - Create scsi_set_host_offline interface
6440 * and the ioa_is_dead check can be removed
6442 if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6443 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6447 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6448 if (ipr_cmd == NULL) {
6449 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6450 return SCSI_MLQUEUE_HOST_BUSY;
6452 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6454 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6455 ioarcb = &ipr_cmd->ioarcb;
6457 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6458 ipr_cmd->scsi_cmd = scsi_cmd;
6459 ipr_cmd->done = ipr_scsi_eh_done;
6461 if (ipr_is_gscsi(res)) {
6462 if (scsi_cmd->underflow == 0)
6463 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6465 if (res->reset_occurred) {
6466 res->reset_occurred = 0;
6467 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6471 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6472 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6474 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6475 if (scsi_cmd->flags & SCMD_TAGGED)
6476 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6478 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
6481 if (scsi_cmd->cmnd[0] >= 0xC0 &&
6482 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6483 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6485 if (res->raw_mode && ipr_is_af_dasd_device(res)) {
6486 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
6488 if (scsi_cmd->underflow == 0)
6489 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6493 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6495 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6497 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6498 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6499 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6500 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6502 scsi_dma_unmap(scsi_cmd);
6503 return SCSI_MLQUEUE_HOST_BUSY;
6506 if (unlikely(hrrq->ioa_is_dead)) {
6507 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6508 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6509 scsi_dma_unmap(scsi_cmd);
6513 ioarcb->res_handle = res->res_handle;
6514 if (res->needs_sync_complete) {
6515 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6516 res->needs_sync_complete = 0;
6518 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6519 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6520 ipr_send_command(ipr_cmd);
6521 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6525 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6526 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6527 scsi_cmd->result = (DID_NO_CONNECT << 16);
6528 scsi_cmd->scsi_done(scsi_cmd);
6529 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6534 * ipr_ioctl - IOCTL handler
6535 * @sdev: scsi device struct
6540 * 0 on success / other on failure
6542 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
6544 struct ipr_resource_entry *res;
6546 res = (struct ipr_resource_entry *)sdev->hostdata;
6547 if (res && ipr_is_gata(res)) {
6548 if (cmd == HDIO_GET_IDENTITY)
6550 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
6557 * ipr_info - Get information about the card/driver
6558 * @scsi_host: scsi host struct
6561 * pointer to buffer with description string
6563 static const char *ipr_ioa_info(struct Scsi_Host *host)
6565 static char buffer[512];
6566 struct ipr_ioa_cfg *ioa_cfg;
6567 unsigned long lock_flags = 0;
6569 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6571 spin_lock_irqsave(host->host_lock, lock_flags);
6572 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6573 spin_unlock_irqrestore(host->host_lock, lock_flags);
6578 static struct scsi_host_template driver_template = {
6579 .module = THIS_MODULE,
6581 .info = ipr_ioa_info,
6583 .queuecommand = ipr_queuecommand,
6584 .eh_abort_handler = ipr_eh_abort,
6585 .eh_device_reset_handler = ipr_eh_dev_reset,
6586 .eh_host_reset_handler = ipr_eh_host_reset,
6587 .slave_alloc = ipr_slave_alloc,
6588 .slave_configure = ipr_slave_configure,
6589 .slave_destroy = ipr_slave_destroy,
6590 .scan_finished = ipr_scan_finished,
6591 .target_alloc = ipr_target_alloc,
6592 .target_destroy = ipr_target_destroy,
6593 .change_queue_depth = ipr_change_queue_depth,
6594 .bios_param = ipr_biosparam,
6595 .can_queue = IPR_MAX_COMMANDS,
6597 .sg_tablesize = IPR_MAX_SGLIST,
6598 .max_sectors = IPR_IOA_MAX_SECTORS,
6599 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6600 .use_clustering = ENABLE_CLUSTERING,
6601 .shost_attrs = ipr_ioa_attrs,
6602 .sdev_attrs = ipr_dev_attrs,
6603 .proc_name = IPR_NAME,
6607 * ipr_ata_phy_reset - libata phy_reset handler
6608 * @ap: ata port to reset
6611 static void ipr_ata_phy_reset(struct ata_port *ap)
6613 unsigned long flags;
6614 struct ipr_sata_port *sata_port = ap->private_data;
6615 struct ipr_resource_entry *res = sata_port->res;
6616 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6620 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6621 while (ioa_cfg->in_reset_reload) {
6622 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6623 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6624 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6627 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6630 rc = ipr_device_reset(ioa_cfg, res);
6633 ap->link.device[0].class = ATA_DEV_NONE;
6637 ap->link.device[0].class = res->ata_class;
6638 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6639 ap->link.device[0].class = ATA_DEV_NONE;
6642 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6647 * ipr_ata_post_internal - Cleanup after an internal command
6648 * @qc: ATA queued command
6653 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6655 struct ipr_sata_port *sata_port = qc->ap->private_data;
6656 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6657 struct ipr_cmnd *ipr_cmd;
6658 struct ipr_hrr_queue *hrrq;
6659 unsigned long flags;
6661 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6662 while (ioa_cfg->in_reset_reload) {
6663 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6664 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6665 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6668 for_each_hrrq(hrrq, ioa_cfg) {
6669 spin_lock(&hrrq->_lock);
6670 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6671 if (ipr_cmd->qc == qc) {
6672 ipr_device_reset(ioa_cfg, sata_port->res);
6676 spin_unlock(&hrrq->_lock);
6678 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6682 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6683 * @regs: destination
6684 * @tf: source ATA taskfile
6689 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6690 struct ata_taskfile *tf)
6692 regs->feature = tf->feature;
6693 regs->nsect = tf->nsect;
6694 regs->lbal = tf->lbal;
6695 regs->lbam = tf->lbam;
6696 regs->lbah = tf->lbah;
6697 regs->device = tf->device;
6698 regs->command = tf->command;
6699 regs->hob_feature = tf->hob_feature;
6700 regs->hob_nsect = tf->hob_nsect;
6701 regs->hob_lbal = tf->hob_lbal;
6702 regs->hob_lbam = tf->hob_lbam;
6703 regs->hob_lbah = tf->hob_lbah;
6704 regs->ctl = tf->ctl;
6708 * ipr_sata_done - done function for SATA commands
6709 * @ipr_cmd: ipr command struct
6711 * This function is invoked by the interrupt handler for
6712 * ops generated by the SCSI mid-layer to SATA devices
6717 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6719 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6720 struct ata_queued_cmd *qc = ipr_cmd->qc;
6721 struct ipr_sata_port *sata_port = qc->ap->private_data;
6722 struct ipr_resource_entry *res = sata_port->res;
6723 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6725 spin_lock(&ipr_cmd->hrrq->_lock);
6726 if (ipr_cmd->ioa_cfg->sis64)
6727 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6728 sizeof(struct ipr_ioasa_gata));
6730 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6731 sizeof(struct ipr_ioasa_gata));
6732 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6734 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6735 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6737 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6738 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6740 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6741 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6742 spin_unlock(&ipr_cmd->hrrq->_lock);
6743 ata_qc_complete(qc);
6747 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6748 * @ipr_cmd: ipr command struct
6749 * @qc: ATA queued command
6752 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6753 struct ata_queued_cmd *qc)
6755 u32 ioadl_flags = 0;
6756 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6757 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
6758 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6759 int len = qc->nbytes;
6760 struct scatterlist *sg;
6762 dma_addr_t dma_addr = ipr_cmd->dma_addr;
6767 if (qc->dma_dir == DMA_TO_DEVICE) {
6768 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6769 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6770 } else if (qc->dma_dir == DMA_FROM_DEVICE)
6771 ioadl_flags = IPR_IOADL_FLAGS_READ;
6773 ioarcb->data_transfer_length = cpu_to_be32(len);
6775 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6776 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6777 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
6779 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6780 ioadl64->flags = cpu_to_be32(ioadl_flags);
6781 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6782 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6784 last_ioadl64 = ioadl64;
6788 if (likely(last_ioadl64))
6789 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6793 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6794 * @ipr_cmd: ipr command struct
6795 * @qc: ATA queued command
6798 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6799 struct ata_queued_cmd *qc)
6801 u32 ioadl_flags = 0;
6802 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6803 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6804 struct ipr_ioadl_desc *last_ioadl = NULL;
6805 int len = qc->nbytes;
6806 struct scatterlist *sg;
6812 if (qc->dma_dir == DMA_TO_DEVICE) {
6813 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6814 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6815 ioarcb->data_transfer_length = cpu_to_be32(len);
6817 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6818 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6819 ioadl_flags = IPR_IOADL_FLAGS_READ;
6820 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6821 ioarcb->read_ioadl_len =
6822 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6825 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6826 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6827 ioadl->address = cpu_to_be32(sg_dma_address(sg));
6833 if (likely(last_ioadl))
6834 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6838 * ipr_qc_defer - Get a free ipr_cmd
6839 * @qc: queued command
6844 static int ipr_qc_defer(struct ata_queued_cmd *qc)
6846 struct ata_port *ap = qc->ap;
6847 struct ipr_sata_port *sata_port = ap->private_data;
6848 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6849 struct ipr_cmnd *ipr_cmd;
6850 struct ipr_hrr_queue *hrrq;
6853 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6854 hrrq = &ioa_cfg->hrrq[hrrq_id];
6856 qc->lldd_task = NULL;
6857 spin_lock(&hrrq->_lock);
6858 if (unlikely(hrrq->ioa_is_dead)) {
6859 spin_unlock(&hrrq->_lock);
6863 if (unlikely(!hrrq->allow_cmds)) {
6864 spin_unlock(&hrrq->_lock);
6865 return ATA_DEFER_LINK;
6868 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6869 if (ipr_cmd == NULL) {
6870 spin_unlock(&hrrq->_lock);
6871 return ATA_DEFER_LINK;
6874 qc->lldd_task = ipr_cmd;
6875 spin_unlock(&hrrq->_lock);
6880 * ipr_qc_issue - Issue a SATA qc to a device
6881 * @qc: queued command
6886 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6888 struct ata_port *ap = qc->ap;
6889 struct ipr_sata_port *sata_port = ap->private_data;
6890 struct ipr_resource_entry *res = sata_port->res;
6891 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6892 struct ipr_cmnd *ipr_cmd;
6893 struct ipr_ioarcb *ioarcb;
6894 struct ipr_ioarcb_ata_regs *regs;
6896 if (qc->lldd_task == NULL)
6899 ipr_cmd = qc->lldd_task;
6900 if (ipr_cmd == NULL)
6901 return AC_ERR_SYSTEM;
6903 qc->lldd_task = NULL;
6904 spin_lock(&ipr_cmd->hrrq->_lock);
6905 if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
6906 ipr_cmd->hrrq->ioa_is_dead)) {
6907 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6908 spin_unlock(&ipr_cmd->hrrq->_lock);
6909 return AC_ERR_SYSTEM;
6912 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
6913 ioarcb = &ipr_cmd->ioarcb;
6915 if (ioa_cfg->sis64) {
6916 regs = &ipr_cmd->i.ata_ioadl.regs;
6917 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6919 regs = &ioarcb->u.add_data.u.regs;
6921 memset(regs, 0, sizeof(*regs));
6922 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
6924 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
6926 ipr_cmd->done = ipr_sata_done;
6927 ipr_cmd->ioarcb.res_handle = res->res_handle;
6928 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6929 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6930 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6931 ipr_cmd->dma_use_sg = qc->n_elem;
6934 ipr_build_ata_ioadl64(ipr_cmd, qc);
6936 ipr_build_ata_ioadl(ipr_cmd, qc);
6938 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6939 ipr_copy_sata_tf(regs, &qc->tf);
6940 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
6941 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6943 switch (qc->tf.protocol) {
6944 case ATA_PROT_NODATA:
6949 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6952 case ATAPI_PROT_PIO:
6953 case ATAPI_PROT_NODATA:
6954 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6957 case ATAPI_PROT_DMA:
6958 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6959 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6964 spin_unlock(&ipr_cmd->hrrq->_lock);
6965 return AC_ERR_INVALID;
6968 ipr_send_command(ipr_cmd);
6969 spin_unlock(&ipr_cmd->hrrq->_lock);
6975 * ipr_qc_fill_rtf - Read result TF
6976 * @qc: ATA queued command
6981 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6983 struct ipr_sata_port *sata_port = qc->ap->private_data;
6984 struct ipr_ioasa_gata *g = &sata_port->ioasa;
6985 struct ata_taskfile *tf = &qc->result_tf;
6987 tf->feature = g->error;
6988 tf->nsect = g->nsect;
6992 tf->device = g->device;
6993 tf->command = g->status;
6994 tf->hob_nsect = g->hob_nsect;
6995 tf->hob_lbal = g->hob_lbal;
6996 tf->hob_lbam = g->hob_lbam;
6997 tf->hob_lbah = g->hob_lbah;
7002 static struct ata_port_operations ipr_sata_ops = {
7003 .phy_reset = ipr_ata_phy_reset,
7004 .hardreset = ipr_sata_reset,
7005 .post_internal_cmd = ipr_ata_post_internal,
7006 .qc_prep = ata_noop_qc_prep,
7007 .qc_defer = ipr_qc_defer,
7008 .qc_issue = ipr_qc_issue,
7009 .qc_fill_rtf = ipr_qc_fill_rtf,
7010 .port_start = ata_sas_port_start,
7011 .port_stop = ata_sas_port_stop
7014 static struct ata_port_info sata_port_info = {
7015 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
7017 .pio_mask = ATA_PIO4_ONLY,
7018 .mwdma_mask = ATA_MWDMA2,
7019 .udma_mask = ATA_UDMA6,
7020 .port_ops = &ipr_sata_ops
7023 #ifdef CONFIG_PPC_PSERIES
7024 static const u16 ipr_blocked_processors[] = {
7036 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
7037 * @ioa_cfg: ioa cfg struct
7039 * Adapters that use Gemstone revision < 3.1 do not work reliably on
7040 * certain pSeries hardware. This function determines if the given
7041 * adapter is in one of these confgurations or not.
7044 * 1 if adapter is not supported / 0 if adapter is supported
7046 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
7050 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
7051 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
7052 if (pvr_version_is(ipr_blocked_processors[i]))
7059 #define ipr_invalid_adapter(ioa_cfg) 0
7063 * ipr_ioa_bringdown_done - IOA bring down completion.
7064 * @ipr_cmd: ipr command struct
7066 * This function processes the completion of an adapter bring down.
7067 * It wakes any reset sleepers.
7072 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
7074 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7078 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
7080 spin_unlock_irq(ioa_cfg->host->host_lock);
7081 scsi_unblock_requests(ioa_cfg->host);
7082 spin_lock_irq(ioa_cfg->host->host_lock);
7085 ioa_cfg->in_reset_reload = 0;
7086 ioa_cfg->reset_retries = 0;
7087 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7088 spin_lock(&ioa_cfg->hrrq[i]._lock);
7089 ioa_cfg->hrrq[i].ioa_is_dead = 1;
7090 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7094 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7095 wake_up_all(&ioa_cfg->reset_wait_q);
7098 return IPR_RC_JOB_RETURN;
7102 * ipr_ioa_reset_done - IOA reset completion.
7103 * @ipr_cmd: ipr command struct
7105 * This function processes the completion of an adapter reset.
7106 * It schedules any necessary mid-layer add/removes and
7107 * wakes any reset sleepers.
7112 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
7114 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7115 struct ipr_resource_entry *res;
7119 ioa_cfg->in_reset_reload = 0;
7120 for (j = 0; j < ioa_cfg->hrrq_num; j++) {
7121 spin_lock(&ioa_cfg->hrrq[j]._lock);
7122 ioa_cfg->hrrq[j].allow_cmds = 1;
7123 spin_unlock(&ioa_cfg->hrrq[j]._lock);
7126 ioa_cfg->reset_cmd = NULL;
7127 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
7129 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
7130 if (res->add_to_ml || res->del_from_ml) {
7135 schedule_work(&ioa_cfg->work_q);
7137 for (j = 0; j < IPR_NUM_HCAMS; j++) {
7138 list_del_init(&ioa_cfg->hostrcb[j]->queue);
7139 if (j < IPR_NUM_LOG_HCAMS)
7140 ipr_send_hcam(ioa_cfg,
7141 IPR_HCAM_CDB_OP_CODE_LOG_DATA,
7142 ioa_cfg->hostrcb[j]);
7144 ipr_send_hcam(ioa_cfg,
7145 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
7146 ioa_cfg->hostrcb[j]);
7149 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
7150 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
7152 ioa_cfg->reset_retries = 0;
7153 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7154 wake_up_all(&ioa_cfg->reset_wait_q);
7156 spin_unlock(ioa_cfg->host->host_lock);
7157 scsi_unblock_requests(ioa_cfg->host);
7158 spin_lock(ioa_cfg->host->host_lock);
7160 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
7161 scsi_block_requests(ioa_cfg->host);
7163 schedule_work(&ioa_cfg->work_q);
7165 return IPR_RC_JOB_RETURN;
7169 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7170 * @supported_dev: supported device struct
7171 * @vpids: vendor product id struct
7176 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
7177 struct ipr_std_inq_vpids *vpids)
7179 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
7180 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
7181 supported_dev->num_records = 1;
7182 supported_dev->data_length =
7183 cpu_to_be16(sizeof(struct ipr_supported_device));
7184 supported_dev->reserved = 0;
7188 * ipr_set_supported_devs - Send Set Supported Devices for a device
7189 * @ipr_cmd: ipr command struct
7191 * This function sends a Set Supported Devices to the adapter
7194 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7196 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
7198 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7199 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
7200 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7201 struct ipr_resource_entry *res = ipr_cmd->u.res;
7203 ipr_cmd->job_step = ipr_ioa_reset_done;
7205 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
7206 if (!ipr_is_scsi_disk(res))
7209 ipr_cmd->u.res = res;
7210 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
7212 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7213 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7214 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7216 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
7217 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
7218 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
7219 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
7221 ipr_init_ioadl(ipr_cmd,
7222 ioa_cfg->vpd_cbs_dma +
7223 offsetof(struct ipr_misc_cbs, supp_dev),
7224 sizeof(struct ipr_supported_device),
7225 IPR_IOADL_FLAGS_WRITE_LAST);
7227 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7228 IPR_SET_SUP_DEVICE_TIMEOUT);
7230 if (!ioa_cfg->sis64)
7231 ipr_cmd->job_step = ipr_set_supported_devs;
7233 return IPR_RC_JOB_RETURN;
7237 return IPR_RC_JOB_CONTINUE;
7241 * ipr_get_mode_page - Locate specified mode page
7242 * @mode_pages: mode page buffer
7243 * @page_code: page code to find
7244 * @len: minimum required length for mode page
7247 * pointer to mode page / NULL on failure
7249 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7250 u32 page_code, u32 len)
7252 struct ipr_mode_page_hdr *mode_hdr;
7256 if (!mode_pages || (mode_pages->hdr.length == 0))
7259 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7260 mode_hdr = (struct ipr_mode_page_hdr *)
7261 (mode_pages->data + mode_pages->hdr.block_desc_len);
7264 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7265 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7269 page_length = (sizeof(struct ipr_mode_page_hdr) +
7270 mode_hdr->page_length);
7271 length -= page_length;
7272 mode_hdr = (struct ipr_mode_page_hdr *)
7273 ((unsigned long)mode_hdr + page_length);
7280 * ipr_check_term_power - Check for term power errors
7281 * @ioa_cfg: ioa config struct
7282 * @mode_pages: IOAFP mode pages buffer
7284 * Check the IOAFP's mode page 28 for term power errors
7289 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7290 struct ipr_mode_pages *mode_pages)
7294 struct ipr_dev_bus_entry *bus;
7295 struct ipr_mode_page28 *mode_page;
7297 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7298 sizeof(struct ipr_mode_page28));
7300 entry_length = mode_page->entry_length;
7302 bus = mode_page->bus;
7304 for (i = 0; i < mode_page->num_entries; i++) {
7305 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7306 dev_err(&ioa_cfg->pdev->dev,
7307 "Term power is absent on scsi bus %d\n",
7311 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7316 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7317 * @ioa_cfg: ioa config struct
7319 * Looks through the config table checking for SES devices. If
7320 * the SES device is in the SES table indicating a maximum SCSI
7321 * bus speed, the speed is limited for the bus.
7326 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7331 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7332 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7333 ioa_cfg->bus_attr[i].bus_width);
7335 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7336 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7341 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7342 * @ioa_cfg: ioa config struct
7343 * @mode_pages: mode page 28 buffer
7345 * Updates mode page 28 based on driver configuration
7350 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7351 struct ipr_mode_pages *mode_pages)
7353 int i, entry_length;
7354 struct ipr_dev_bus_entry *bus;
7355 struct ipr_bus_attributes *bus_attr;
7356 struct ipr_mode_page28 *mode_page;
7358 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7359 sizeof(struct ipr_mode_page28));
7361 entry_length = mode_page->entry_length;
7363 /* Loop for each device bus entry */
7364 for (i = 0, bus = mode_page->bus;
7365 i < mode_page->num_entries;
7366 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7367 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7368 dev_err(&ioa_cfg->pdev->dev,
7369 "Invalid resource address reported: 0x%08X\n",
7370 IPR_GET_PHYS_LOC(bus->res_addr));
7374 bus_attr = &ioa_cfg->bus_attr[i];
7375 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7376 bus->bus_width = bus_attr->bus_width;
7377 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7378 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7379 if (bus_attr->qas_enabled)
7380 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7382 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7387 * ipr_build_mode_select - Build a mode select command
7388 * @ipr_cmd: ipr command struct
7389 * @res_handle: resource handle to send command to
7390 * @parm: Byte 2 of Mode Sense command
7391 * @dma_addr: DMA buffer address
7392 * @xfer_len: data transfer length
7397 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
7398 __be32 res_handle, u8 parm,
7399 dma_addr_t dma_addr, u8 xfer_len)
7401 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7403 ioarcb->res_handle = res_handle;
7404 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7405 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7406 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7407 ioarcb->cmd_pkt.cdb[1] = parm;
7408 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7410 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
7414 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7415 * @ipr_cmd: ipr command struct
7417 * This function sets up the SCSI bus attributes and sends
7418 * a Mode Select for Page 28 to activate them.
7423 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7425 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7426 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7430 ipr_scsi_bus_speed_limit(ioa_cfg);
7431 ipr_check_term_power(ioa_cfg, mode_pages);
7432 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7433 length = mode_pages->hdr.length + 1;
7434 mode_pages->hdr.length = 0;
7436 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7437 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7440 ipr_cmd->job_step = ipr_set_supported_devs;
7441 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7442 struct ipr_resource_entry, queue);
7443 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7446 return IPR_RC_JOB_RETURN;
7450 * ipr_build_mode_sense - Builds a mode sense command
7451 * @ipr_cmd: ipr command struct
7452 * @res: resource entry struct
7453 * @parm: Byte 2 of mode sense command
7454 * @dma_addr: DMA address of mode sense buffer
7455 * @xfer_len: Size of DMA buffer
7460 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7462 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
7464 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7466 ioarcb->res_handle = res_handle;
7467 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7468 ioarcb->cmd_pkt.cdb[2] = parm;
7469 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7470 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7472 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7476 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7477 * @ipr_cmd: ipr command struct
7479 * This function handles the failure of an IOA bringup command.
7484 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7486 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7487 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7489 dev_err(&ioa_cfg->pdev->dev,
7490 "0x%02X failed with IOASC: 0x%08X\n",
7491 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7493 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7494 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7495 return IPR_RC_JOB_RETURN;
7499 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7500 * @ipr_cmd: ipr command struct
7502 * This function handles the failure of a Mode Sense to the IOAFP.
7503 * Some adapters do not handle all mode pages.
7506 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7508 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7510 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7511 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7513 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7514 ipr_cmd->job_step = ipr_set_supported_devs;
7515 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7516 struct ipr_resource_entry, queue);
7517 return IPR_RC_JOB_CONTINUE;
7520 return ipr_reset_cmd_failed(ipr_cmd);
7524 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7525 * @ipr_cmd: ipr command struct
7527 * This function send a Page 28 mode sense to the IOA to
7528 * retrieve SCSI bus attributes.
7533 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7535 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7538 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7539 0x28, ioa_cfg->vpd_cbs_dma +
7540 offsetof(struct ipr_misc_cbs, mode_pages),
7541 sizeof(struct ipr_mode_pages));
7543 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
7544 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
7546 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7549 return IPR_RC_JOB_RETURN;
7553 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7554 * @ipr_cmd: ipr command struct
7556 * This function enables dual IOA RAID support if possible.
7561 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7563 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7564 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7565 struct ipr_mode_page24 *mode_page;
7569 mode_page = ipr_get_mode_page(mode_pages, 0x24,
7570 sizeof(struct ipr_mode_page24));
7573 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7575 length = mode_pages->hdr.length + 1;
7576 mode_pages->hdr.length = 0;
7578 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7579 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7582 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7583 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7586 return IPR_RC_JOB_RETURN;
7590 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7591 * @ipr_cmd: ipr command struct
7593 * This function handles the failure of a Mode Sense to the IOAFP.
7594 * Some adapters do not handle all mode pages.
7597 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7599 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7601 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7603 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7604 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7605 return IPR_RC_JOB_CONTINUE;
7608 return ipr_reset_cmd_failed(ipr_cmd);
7612 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7613 * @ipr_cmd: ipr command struct
7615 * This function send a mode sense to the IOA to retrieve
7616 * the IOA Advanced Function Control mode page.
7621 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7623 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7626 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7627 0x24, ioa_cfg->vpd_cbs_dma +
7628 offsetof(struct ipr_misc_cbs, mode_pages),
7629 sizeof(struct ipr_mode_pages));
7631 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7632 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7634 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7637 return IPR_RC_JOB_RETURN;
7641 * ipr_init_res_table - Initialize the resource table
7642 * @ipr_cmd: ipr command struct
7644 * This function looks through the existing resource table, comparing
7645 * it with the config table. This function will take care of old/new
7646 * devices and schedule adding/removing them from the mid-layer
7650 * IPR_RC_JOB_CONTINUE
7652 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7654 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7655 struct ipr_resource_entry *res, *temp;
7656 struct ipr_config_table_entry_wrapper cfgtew;
7657 int entries, found, flag, i;
7662 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7664 flag = ioa_cfg->u.cfg_table->hdr.flags;
7666 if (flag & IPR_UCODE_DOWNLOAD_REQ)
7667 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7669 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7670 list_move_tail(&res->queue, &old_res);
7673 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7675 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7677 for (i = 0; i < entries; i++) {
7679 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7681 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7684 list_for_each_entry_safe(res, temp, &old_res, queue) {
7685 if (ipr_is_same_device(res, &cfgtew)) {
7686 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7693 if (list_empty(&ioa_cfg->free_res_q)) {
7694 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7699 res = list_entry(ioa_cfg->free_res_q.next,
7700 struct ipr_resource_entry, queue);
7701 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7702 ipr_init_res_entry(res, &cfgtew);
7704 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7705 res->sdev->allow_restart = 1;
7708 ipr_update_res_entry(res, &cfgtew);
7711 list_for_each_entry_safe(res, temp, &old_res, queue) {
7713 res->del_from_ml = 1;
7714 res->res_handle = IPR_INVALID_RES_HANDLE;
7715 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7719 list_for_each_entry_safe(res, temp, &old_res, queue) {
7720 ipr_clear_res_target(res);
7721 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7724 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7725 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7727 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7730 return IPR_RC_JOB_CONTINUE;
7734 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7735 * @ipr_cmd: ipr command struct
7737 * This function sends a Query IOA Configuration command
7738 * to the adapter to retrieve the IOA configuration table.
7743 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7745 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7746 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7747 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7748 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7751 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7752 ioa_cfg->dual_raid = 1;
7753 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7754 ucode_vpd->major_release, ucode_vpd->card_type,
7755 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7756 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7757 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7759 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7760 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7761 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7762 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7764 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7765 IPR_IOADL_FLAGS_READ_LAST);
7767 ipr_cmd->job_step = ipr_init_res_table;
7769 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7772 return IPR_RC_JOB_RETURN;
7775 static int ipr_ioa_service_action_failed(struct ipr_cmnd *ipr_cmd)
7777 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7779 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT)
7780 return IPR_RC_JOB_CONTINUE;
7782 return ipr_reset_cmd_failed(ipr_cmd);
7785 static void ipr_build_ioa_service_action(struct ipr_cmnd *ipr_cmd,
7786 __be32 res_handle, u8 sa_code)
7788 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7790 ioarcb->res_handle = res_handle;
7791 ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION;
7792 ioarcb->cmd_pkt.cdb[1] = sa_code;
7793 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7797 * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7803 static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd *ipr_cmd)
7805 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7806 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7807 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7811 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7813 if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) {
7814 ipr_build_ioa_service_action(ipr_cmd,
7815 cpu_to_be32(IPR_IOA_RES_HANDLE),
7816 IPR_IOA_SA_CHANGE_CACHE_PARAMS);
7818 ioarcb->cmd_pkt.cdb[2] = 0x40;
7820 ipr_cmd->job_step_failed = ipr_ioa_service_action_failed;
7821 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7822 IPR_SET_SUP_DEVICE_TIMEOUT);
7825 return IPR_RC_JOB_RETURN;
7829 return IPR_RC_JOB_CONTINUE;
7833 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7834 * @ipr_cmd: ipr command struct
7836 * This utility function sends an inquiry to the adapter.
7841 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7842 dma_addr_t dma_addr, u8 xfer_len)
7844 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7847 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7848 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7850 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7851 ioarcb->cmd_pkt.cdb[1] = flags;
7852 ioarcb->cmd_pkt.cdb[2] = page;
7853 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7855 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7857 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7862 * ipr_inquiry_page_supported - Is the given inquiry page supported
7863 * @page0: inquiry page 0 buffer
7866 * This function determines if the specified inquiry page is supported.
7869 * 1 if page is supported / 0 if not
7871 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7875 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7876 if (page0->page[i] == page)
7883 * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
7884 * @ipr_cmd: ipr command struct
7886 * This function sends a Page 0xC4 inquiry to the adapter
7887 * to retrieve software VPD information.
7890 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7892 static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd *ipr_cmd)
7894 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7895 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7896 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7899 ipr_cmd->job_step = ipr_ioafp_set_caching_parameters;
7900 memset(pageC4, 0, sizeof(*pageC4));
7902 if (ipr_inquiry_page_supported(page0, 0xC4)) {
7903 ipr_ioafp_inquiry(ipr_cmd, 1, 0xC4,
7904 (ioa_cfg->vpd_cbs_dma
7905 + offsetof(struct ipr_misc_cbs,
7907 sizeof(struct ipr_inquiry_pageC4));
7908 return IPR_RC_JOB_RETURN;
7912 return IPR_RC_JOB_CONTINUE;
7916 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7917 * @ipr_cmd: ipr command struct
7919 * This function sends a Page 0xD0 inquiry to the adapter
7920 * to retrieve adapter capabilities.
7923 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7925 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7927 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7928 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7929 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7932 ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry;
7933 memset(cap, 0, sizeof(*cap));
7935 if (ipr_inquiry_page_supported(page0, 0xD0)) {
7936 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7937 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7938 sizeof(struct ipr_inquiry_cap));
7939 return IPR_RC_JOB_RETURN;
7943 return IPR_RC_JOB_CONTINUE;
7947 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7948 * @ipr_cmd: ipr command struct
7950 * This function sends a Page 3 inquiry to the adapter
7951 * to retrieve software VPD information.
7954 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7956 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
7958 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7962 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
7964 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7965 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7966 sizeof(struct ipr_inquiry_page3));
7969 return IPR_RC_JOB_RETURN;
7973 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7974 * @ipr_cmd: ipr command struct
7976 * This function sends a Page 0 inquiry to the adapter
7977 * to retrieve supported inquiry pages.
7980 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7982 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
7984 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7989 /* Grab the type out of the VPD and store it away */
7990 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7992 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7994 if (ipr_invalid_adapter(ioa_cfg)) {
7995 dev_err(&ioa_cfg->pdev->dev,
7996 "Adapter not supported in this hardware configuration.\n");
7998 if (!ipr_testmode) {
7999 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
8000 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8001 list_add_tail(&ipr_cmd->queue,
8002 &ioa_cfg->hrrq->hrrq_free_q);
8003 return IPR_RC_JOB_RETURN;
8007 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
8009 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
8010 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
8011 sizeof(struct ipr_inquiry_page0));
8014 return IPR_RC_JOB_RETURN;
8018 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
8019 * @ipr_cmd: ipr command struct
8021 * This function sends a standard inquiry to the adapter.
8026 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
8028 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8031 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
8033 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
8034 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
8035 sizeof(struct ipr_ioa_vpd));
8038 return IPR_RC_JOB_RETURN;
8042 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
8043 * @ipr_cmd: ipr command struct
8045 * This function send an Identify Host Request Response Queue
8046 * command to establish the HRRQ with the adapter.
8051 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
8053 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8054 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
8055 struct ipr_hrr_queue *hrrq;
8058 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
8059 if (ioa_cfg->identify_hrrq_index == 0)
8060 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
8062 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
8063 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
8065 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
8066 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8068 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8070 ioarcb->cmd_pkt.cdb[1] = 0x1;
8072 if (ioa_cfg->nvectors == 1)
8073 ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
8075 ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
8077 ioarcb->cmd_pkt.cdb[2] =
8078 ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
8079 ioarcb->cmd_pkt.cdb[3] =
8080 ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
8081 ioarcb->cmd_pkt.cdb[4] =
8082 ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
8083 ioarcb->cmd_pkt.cdb[5] =
8084 ((u64) hrrq->host_rrq_dma) & 0xff;
8085 ioarcb->cmd_pkt.cdb[7] =
8086 ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
8087 ioarcb->cmd_pkt.cdb[8] =
8088 (sizeof(u32) * hrrq->size) & 0xff;
8090 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8091 ioarcb->cmd_pkt.cdb[9] =
8092 ioa_cfg->identify_hrrq_index;
8094 if (ioa_cfg->sis64) {
8095 ioarcb->cmd_pkt.cdb[10] =
8096 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
8097 ioarcb->cmd_pkt.cdb[11] =
8098 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
8099 ioarcb->cmd_pkt.cdb[12] =
8100 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
8101 ioarcb->cmd_pkt.cdb[13] =
8102 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
8105 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8106 ioarcb->cmd_pkt.cdb[14] =
8107 ioa_cfg->identify_hrrq_index;
8109 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8110 IPR_INTERNAL_TIMEOUT);
8112 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
8113 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8116 return IPR_RC_JOB_RETURN;
8120 return IPR_RC_JOB_CONTINUE;
8124 * ipr_reset_timer_done - Adapter reset timer function
8125 * @ipr_cmd: ipr command struct
8127 * Description: This function is used in adapter reset processing
8128 * for timing events. If the reset_cmd pointer in the IOA
8129 * config struct is not this adapter's we are doing nested
8130 * resets and fail_all_ops will take care of freeing the
8136 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
8138 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8139 unsigned long lock_flags = 0;
8141 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8143 if (ioa_cfg->reset_cmd == ipr_cmd) {
8144 list_del(&ipr_cmd->queue);
8145 ipr_cmd->done(ipr_cmd);
8148 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8152 * ipr_reset_start_timer - Start a timer for adapter reset job
8153 * @ipr_cmd: ipr command struct
8154 * @timeout: timeout value
8156 * Description: This function is used in adapter reset processing
8157 * for timing events. If the reset_cmd pointer in the IOA
8158 * config struct is not this adapter's we are doing nested
8159 * resets and fail_all_ops will take care of freeing the
8165 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
8166 unsigned long timeout)
8170 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8171 ipr_cmd->done = ipr_reset_ioa_job;
8173 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8174 ipr_cmd->timer.expires = jiffies + timeout;
8175 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
8176 add_timer(&ipr_cmd->timer);
8180 * ipr_init_ioa_mem - Initialize ioa_cfg control block
8181 * @ioa_cfg: ioa cfg struct
8186 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
8188 struct ipr_hrr_queue *hrrq;
8190 for_each_hrrq(hrrq, ioa_cfg) {
8191 spin_lock(&hrrq->_lock);
8192 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
8194 /* Initialize Host RRQ pointers */
8195 hrrq->hrrq_start = hrrq->host_rrq;
8196 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
8197 hrrq->hrrq_curr = hrrq->hrrq_start;
8198 hrrq->toggle_bit = 1;
8199 spin_unlock(&hrrq->_lock);
8203 ioa_cfg->identify_hrrq_index = 0;
8204 if (ioa_cfg->hrrq_num == 1)
8205 atomic_set(&ioa_cfg->hrrq_index, 0);
8207 atomic_set(&ioa_cfg->hrrq_index, 1);
8209 /* Zero out config table */
8210 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
8214 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
8215 * @ipr_cmd: ipr command struct
8218 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8220 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
8222 unsigned long stage, stage_time;
8224 volatile u32 int_reg;
8225 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8228 feedback = readl(ioa_cfg->regs.init_feedback_reg);
8229 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
8230 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
8232 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
8234 /* sanity check the stage_time value */
8235 if (stage_time == 0)
8236 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
8237 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
8238 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
8239 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
8240 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
8242 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
8243 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
8244 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8245 stage_time = ioa_cfg->transop_timeout;
8246 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8247 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
8248 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8249 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8250 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8251 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8252 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
8253 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
8254 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8255 return IPR_RC_JOB_CONTINUE;
8259 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8260 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
8261 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
8262 ipr_cmd->done = ipr_reset_ioa_job;
8263 add_timer(&ipr_cmd->timer);
8265 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8267 return IPR_RC_JOB_RETURN;
8271 * ipr_reset_enable_ioa - Enable the IOA following a reset.
8272 * @ipr_cmd: ipr command struct
8274 * This function reinitializes some control blocks and
8275 * enables destructive diagnostics on the adapter.
8280 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
8282 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8283 volatile u32 int_reg;
8284 volatile u64 maskval;
8288 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8289 ipr_init_ioa_mem(ioa_cfg);
8291 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8292 spin_lock(&ioa_cfg->hrrq[i]._lock);
8293 ioa_cfg->hrrq[i].allow_interrupts = 1;
8294 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8297 if (ioa_cfg->sis64) {
8298 /* Set the adapter to the correct endian mode. */
8299 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8300 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8303 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8305 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8306 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
8307 ioa_cfg->regs.clr_interrupt_mask_reg32);
8308 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8309 return IPR_RC_JOB_CONTINUE;
8312 /* Enable destructive diagnostics on IOA */
8313 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
8315 if (ioa_cfg->sis64) {
8316 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8317 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
8318 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
8320 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
8322 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8324 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
8326 if (ioa_cfg->sis64) {
8327 ipr_cmd->job_step = ipr_reset_next_stage;
8328 return IPR_RC_JOB_CONTINUE;
8331 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8332 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
8333 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
8334 ipr_cmd->done = ipr_reset_ioa_job;
8335 add_timer(&ipr_cmd->timer);
8336 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8339 return IPR_RC_JOB_RETURN;
8343 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8344 * @ipr_cmd: ipr command struct
8346 * This function is invoked when an adapter dump has run out
8347 * of processing time.
8350 * IPR_RC_JOB_CONTINUE
8352 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8354 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8356 if (ioa_cfg->sdt_state == GET_DUMP)
8357 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8358 else if (ioa_cfg->sdt_state == READ_DUMP)
8359 ioa_cfg->sdt_state = ABORT_DUMP;
8361 ioa_cfg->dump_timeout = 1;
8362 ipr_cmd->job_step = ipr_reset_alert;
8364 return IPR_RC_JOB_CONTINUE;
8368 * ipr_unit_check_no_data - Log a unit check/no data error log
8369 * @ioa_cfg: ioa config struct
8371 * Logs an error indicating the adapter unit checked, but for some
8372 * reason, we were unable to fetch the unit check buffer.
8377 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8379 ioa_cfg->errors_logged++;
8380 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8384 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8385 * @ioa_cfg: ioa config struct
8387 * Fetches the unit check buffer from the adapter by clocking the data
8388 * through the mailbox register.
8393 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8395 unsigned long mailbox;
8396 struct ipr_hostrcb *hostrcb;
8397 struct ipr_uc_sdt sdt;
8401 mailbox = readl(ioa_cfg->ioa_mailbox);
8403 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
8404 ipr_unit_check_no_data(ioa_cfg);
8408 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8409 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8410 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8412 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8413 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8414 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
8415 ipr_unit_check_no_data(ioa_cfg);
8419 /* Find length of the first sdt entry (UC buffer) */
8420 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8421 length = be32_to_cpu(sdt.entry[0].end_token);
8423 length = (be32_to_cpu(sdt.entry[0].end_token) -
8424 be32_to_cpu(sdt.entry[0].start_token)) &
8425 IPR_FMT2_MBX_ADDR_MASK;
8427 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8428 struct ipr_hostrcb, queue);
8429 list_del_init(&hostrcb->queue);
8430 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8432 rc = ipr_get_ldump_data_section(ioa_cfg,
8433 be32_to_cpu(sdt.entry[0].start_token),
8434 (__be32 *)&hostrcb->hcam,
8435 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8438 ipr_handle_log_data(ioa_cfg, hostrcb);
8439 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
8440 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8441 ioa_cfg->sdt_state == GET_DUMP)
8442 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8444 ipr_unit_check_no_data(ioa_cfg);
8446 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8450 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8451 * @ipr_cmd: ipr command struct
8453 * Description: This function will call to get the unit check buffer.
8458 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8460 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8463 ioa_cfg->ioa_unit_checked = 0;
8464 ipr_get_unit_check_buffer(ioa_cfg);
8465 ipr_cmd->job_step = ipr_reset_alert;
8466 ipr_reset_start_timer(ipr_cmd, 0);
8469 return IPR_RC_JOB_RETURN;
8472 static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd)
8474 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8478 if (ioa_cfg->sdt_state != GET_DUMP)
8479 return IPR_RC_JOB_RETURN;
8481 if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left ||
8482 (readl(ioa_cfg->regs.sense_interrupt_reg) &
8483 IPR_PCII_MAILBOX_STABLE)) {
8485 if (!ipr_cmd->u.time_left)
8486 dev_err(&ioa_cfg->pdev->dev,
8487 "Timed out waiting for Mailbox register.\n");
8489 ioa_cfg->sdt_state = READ_DUMP;
8490 ioa_cfg->dump_timeout = 0;
8492 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8494 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8495 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8496 schedule_work(&ioa_cfg->work_q);
8499 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8500 ipr_reset_start_timer(ipr_cmd,
8501 IPR_CHECK_FOR_RESET_TIMEOUT);
8505 return IPR_RC_JOB_RETURN;
8509 * ipr_reset_restore_cfg_space - Restore PCI config space.
8510 * @ipr_cmd: ipr command struct
8512 * Description: This function restores the saved PCI config space of
8513 * the adapter, fails all outstanding ops back to the callers, and
8514 * fetches the dump/unit check if applicable to this reset.
8517 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8519 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8521 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8525 ioa_cfg->pdev->state_saved = true;
8526 pci_restore_state(ioa_cfg->pdev);
8528 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8529 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8530 return IPR_RC_JOB_CONTINUE;
8533 ipr_fail_all_ops(ioa_cfg);
8535 if (ioa_cfg->sis64) {
8536 /* Set the adapter to the correct endian mode. */
8537 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8538 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8541 if (ioa_cfg->ioa_unit_checked) {
8542 if (ioa_cfg->sis64) {
8543 ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8544 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8545 return IPR_RC_JOB_RETURN;
8547 ioa_cfg->ioa_unit_checked = 0;
8548 ipr_get_unit_check_buffer(ioa_cfg);
8549 ipr_cmd->job_step = ipr_reset_alert;
8550 ipr_reset_start_timer(ipr_cmd, 0);
8551 return IPR_RC_JOB_RETURN;
8555 if (ioa_cfg->in_ioa_bringdown) {
8556 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8557 } else if (ioa_cfg->sdt_state == GET_DUMP) {
8558 ipr_cmd->job_step = ipr_dump_mailbox_wait;
8559 ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX;
8561 ipr_cmd->job_step = ipr_reset_enable_ioa;
8565 return IPR_RC_JOB_CONTINUE;
8569 * ipr_reset_bist_done - BIST has completed on the adapter.
8570 * @ipr_cmd: ipr command struct
8572 * Description: Unblock config space and resume the reset process.
8575 * IPR_RC_JOB_CONTINUE
8577 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8579 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8582 if (ioa_cfg->cfg_locked)
8583 pci_cfg_access_unlock(ioa_cfg->pdev);
8584 ioa_cfg->cfg_locked = 0;
8585 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8587 return IPR_RC_JOB_CONTINUE;
8591 * ipr_reset_start_bist - Run BIST on the adapter.
8592 * @ipr_cmd: ipr command struct
8594 * Description: This function runs BIST on the adapter, then delays 2 seconds.
8597 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8599 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8601 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8602 int rc = PCIBIOS_SUCCESSFUL;
8605 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8606 writel(IPR_UPROCI_SIS64_START_BIST,
8607 ioa_cfg->regs.set_uproc_interrupt_reg32);
8609 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8611 if (rc == PCIBIOS_SUCCESSFUL) {
8612 ipr_cmd->job_step = ipr_reset_bist_done;
8613 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8614 rc = IPR_RC_JOB_RETURN;
8616 if (ioa_cfg->cfg_locked)
8617 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8618 ioa_cfg->cfg_locked = 0;
8619 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8620 rc = IPR_RC_JOB_CONTINUE;
8628 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8629 * @ipr_cmd: ipr command struct
8631 * Description: This clears PCI reset to the adapter and delays two seconds.
8636 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8639 ipr_cmd->job_step = ipr_reset_bist_done;
8640 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8642 return IPR_RC_JOB_RETURN;
8646 * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8647 * @work: work struct
8649 * Description: This pulses warm reset to a slot.
8652 static void ipr_reset_reset_work(struct work_struct *work)
8654 struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
8655 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8656 struct pci_dev *pdev = ioa_cfg->pdev;
8657 unsigned long lock_flags = 0;
8660 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8661 msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
8662 pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
8664 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8665 if (ioa_cfg->reset_cmd == ipr_cmd)
8666 ipr_reset_ioa_job(ipr_cmd);
8667 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8672 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8673 * @ipr_cmd: ipr command struct
8675 * Description: This asserts PCI reset to the adapter.
8680 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8682 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8685 INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
8686 queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
8687 ipr_cmd->job_step = ipr_reset_slot_reset_done;
8689 return IPR_RC_JOB_RETURN;
8693 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8694 * @ipr_cmd: ipr command struct
8696 * Description: This attempts to block config access to the IOA.
8699 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8701 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8703 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8704 int rc = IPR_RC_JOB_CONTINUE;
8706 if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8707 ioa_cfg->cfg_locked = 1;
8708 ipr_cmd->job_step = ioa_cfg->reset;
8710 if (ipr_cmd->u.time_left) {
8711 rc = IPR_RC_JOB_RETURN;
8712 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8713 ipr_reset_start_timer(ipr_cmd,
8714 IPR_CHECK_FOR_RESET_TIMEOUT);
8716 ipr_cmd->job_step = ioa_cfg->reset;
8717 dev_err(&ioa_cfg->pdev->dev,
8718 "Timed out waiting to lock config access. Resetting anyway.\n");
8726 * ipr_reset_block_config_access - Block config access to the IOA
8727 * @ipr_cmd: ipr command struct
8729 * Description: This attempts to block config access to the IOA
8732 * IPR_RC_JOB_CONTINUE
8734 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8736 ipr_cmd->ioa_cfg->cfg_locked = 0;
8737 ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8738 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8739 return IPR_RC_JOB_CONTINUE;
8743 * ipr_reset_allowed - Query whether or not IOA can be reset
8744 * @ioa_cfg: ioa config struct
8747 * 0 if reset not allowed / non-zero if reset is allowed
8749 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8751 volatile u32 temp_reg;
8753 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8754 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8758 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8759 * @ipr_cmd: ipr command struct
8761 * Description: This function waits for adapter permission to run BIST,
8762 * then runs BIST. If the adapter does not give permission after a
8763 * reasonable time, we will reset the adapter anyway. The impact of
8764 * resetting the adapter without warning the adapter is the risk of
8765 * losing the persistent error log on the adapter. If the adapter is
8766 * reset while it is writing to the flash on the adapter, the flash
8767 * segment will have bad ECC and be zeroed.
8770 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8772 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8774 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8775 int rc = IPR_RC_JOB_RETURN;
8777 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8778 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8779 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8781 ipr_cmd->job_step = ipr_reset_block_config_access;
8782 rc = IPR_RC_JOB_CONTINUE;
8789 * ipr_reset_alert - Alert the adapter of a pending reset
8790 * @ipr_cmd: ipr command struct
8792 * Description: This function alerts the adapter that it will be reset.
8793 * If memory space is not currently enabled, proceed directly
8794 * to running BIST on the adapter. The timer must always be started
8795 * so we guarantee we do not run BIST from ipr_isr.
8800 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8802 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8807 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8809 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8810 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8811 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8812 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8814 ipr_cmd->job_step = ipr_reset_block_config_access;
8817 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8818 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8821 return IPR_RC_JOB_RETURN;
8825 * ipr_reset_quiesce_done - Complete IOA disconnect
8826 * @ipr_cmd: ipr command struct
8828 * Description: Freeze the adapter to complete quiesce processing
8831 * IPR_RC_JOB_CONTINUE
8833 static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
8835 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8838 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8839 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8841 return IPR_RC_JOB_CONTINUE;
8845 * ipr_reset_cancel_hcam_done - Check for outstanding commands
8846 * @ipr_cmd: ipr command struct
8848 * Description: Ensure nothing is outstanding to the IOA and
8849 * proceed with IOA disconnect. Otherwise reset the IOA.
8852 * IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
8854 static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
8856 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8857 struct ipr_cmnd *loop_cmd;
8858 struct ipr_hrr_queue *hrrq;
8859 int rc = IPR_RC_JOB_CONTINUE;
8863 ipr_cmd->job_step = ipr_reset_quiesce_done;
8865 for_each_hrrq(hrrq, ioa_cfg) {
8866 spin_lock(&hrrq->_lock);
8867 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
8869 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8870 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
8871 rc = IPR_RC_JOB_RETURN;
8874 spin_unlock(&hrrq->_lock);
8885 * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
8886 * @ipr_cmd: ipr command struct
8888 * Description: Cancel any oustanding HCAMs to the IOA.
8891 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8893 static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
8895 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8896 int rc = IPR_RC_JOB_CONTINUE;
8897 struct ipr_cmd_pkt *cmd_pkt;
8898 struct ipr_cmnd *hcam_cmd;
8899 struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
8902 ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
8904 if (!hrrq->ioa_is_dead) {
8905 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
8906 list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
8907 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
8910 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8911 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8912 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
8913 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
8914 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
8915 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
8916 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
8917 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
8918 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
8919 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
8920 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
8921 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
8922 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
8923 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
8925 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8926 IPR_CANCEL_TIMEOUT);
8928 rc = IPR_RC_JOB_RETURN;
8929 ipr_cmd->job_step = ipr_reset_cancel_hcam;
8934 ipr_cmd->job_step = ipr_reset_alert;
8941 * ipr_reset_ucode_download_done - Microcode download completion
8942 * @ipr_cmd: ipr command struct
8944 * Description: This function unmaps the microcode download buffer.
8947 * IPR_RC_JOB_CONTINUE
8949 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
8951 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8952 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8954 dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
8955 sglist->num_sg, DMA_TO_DEVICE);
8957 ipr_cmd->job_step = ipr_reset_alert;
8958 return IPR_RC_JOB_CONTINUE;
8962 * ipr_reset_ucode_download - Download microcode to the adapter
8963 * @ipr_cmd: ipr command struct
8965 * Description: This function checks to see if it there is microcode
8966 * to download to the adapter. If there is, a download is performed.
8969 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8971 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
8973 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8974 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8977 ipr_cmd->job_step = ipr_reset_alert;
8980 return IPR_RC_JOB_CONTINUE;
8982 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8983 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8984 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
8985 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
8986 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
8987 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
8988 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
8991 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
8993 ipr_build_ucode_ioadl(ipr_cmd, sglist);
8994 ipr_cmd->job_step = ipr_reset_ucode_download_done;
8996 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8997 IPR_WRITE_BUFFER_TIMEOUT);
9000 return IPR_RC_JOB_RETURN;
9004 * ipr_reset_shutdown_ioa - Shutdown the adapter
9005 * @ipr_cmd: ipr command struct
9007 * Description: This function issues an adapter shutdown of the
9008 * specified type to the specified adapter as part of the
9009 * adapter reset job.
9012 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9014 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
9016 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9017 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
9018 unsigned long timeout;
9019 int rc = IPR_RC_JOB_CONTINUE;
9022 if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
9023 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9024 else if (shutdown_type != IPR_SHUTDOWN_NONE &&
9025 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
9026 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9027 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9028 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
9029 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
9031 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
9032 timeout = IPR_SHUTDOWN_TIMEOUT;
9033 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
9034 timeout = IPR_INTERNAL_TIMEOUT;
9035 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
9036 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
9038 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
9040 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
9042 rc = IPR_RC_JOB_RETURN;
9043 ipr_cmd->job_step = ipr_reset_ucode_download;
9045 ipr_cmd->job_step = ipr_reset_alert;
9052 * ipr_reset_ioa_job - Adapter reset job
9053 * @ipr_cmd: ipr command struct
9055 * Description: This function is the job router for the adapter reset job.
9060 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
9063 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9066 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
9068 if (ioa_cfg->reset_cmd != ipr_cmd) {
9070 * We are doing nested adapter resets and this is
9071 * not the current reset job.
9073 list_add_tail(&ipr_cmd->queue,
9074 &ipr_cmd->hrrq->hrrq_free_q);
9078 if (IPR_IOASC_SENSE_KEY(ioasc)) {
9079 rc = ipr_cmd->job_step_failed(ipr_cmd);
9080 if (rc == IPR_RC_JOB_RETURN)
9084 ipr_reinit_ipr_cmnd(ipr_cmd);
9085 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
9086 rc = ipr_cmd->job_step(ipr_cmd);
9087 } while (rc == IPR_RC_JOB_CONTINUE);
9091 * _ipr_initiate_ioa_reset - Initiate an adapter reset
9092 * @ioa_cfg: ioa config struct
9093 * @job_step: first job step of reset job
9094 * @shutdown_type: shutdown type
9096 * Description: This function will initiate the reset of the given adapter
9097 * starting at the selected job step.
9098 * If the caller needs to wait on the completion of the reset,
9099 * the caller must sleep on the reset_wait_q.
9104 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9105 int (*job_step) (struct ipr_cmnd *),
9106 enum ipr_shutdown_type shutdown_type)
9108 struct ipr_cmnd *ipr_cmd;
9111 ioa_cfg->in_reset_reload = 1;
9112 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9113 spin_lock(&ioa_cfg->hrrq[i]._lock);
9114 ioa_cfg->hrrq[i].allow_cmds = 0;
9115 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9118 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa)
9119 scsi_block_requests(ioa_cfg->host);
9121 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9122 ioa_cfg->reset_cmd = ipr_cmd;
9123 ipr_cmd->job_step = job_step;
9124 ipr_cmd->u.shutdown_type = shutdown_type;
9126 ipr_reset_ioa_job(ipr_cmd);
9130 * ipr_initiate_ioa_reset - Initiate an adapter reset
9131 * @ioa_cfg: ioa config struct
9132 * @shutdown_type: shutdown type
9134 * Description: This function will initiate the reset of the given adapter.
9135 * If the caller needs to wait on the completion of the reset,
9136 * the caller must sleep on the reset_wait_q.
9141 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9142 enum ipr_shutdown_type shutdown_type)
9146 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
9149 if (ioa_cfg->in_reset_reload) {
9150 if (ioa_cfg->sdt_state == GET_DUMP)
9151 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9152 else if (ioa_cfg->sdt_state == READ_DUMP)
9153 ioa_cfg->sdt_state = ABORT_DUMP;
9156 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
9157 dev_err(&ioa_cfg->pdev->dev,
9158 "IOA taken offline - error recovery failed\n");
9160 ioa_cfg->reset_retries = 0;
9161 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9162 spin_lock(&ioa_cfg->hrrq[i]._lock);
9163 ioa_cfg->hrrq[i].ioa_is_dead = 1;
9164 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9168 if (ioa_cfg->in_ioa_bringdown) {
9169 ioa_cfg->reset_cmd = NULL;
9170 ioa_cfg->in_reset_reload = 0;
9171 ipr_fail_all_ops(ioa_cfg);
9172 wake_up_all(&ioa_cfg->reset_wait_q);
9174 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9175 spin_unlock_irq(ioa_cfg->host->host_lock);
9176 scsi_unblock_requests(ioa_cfg->host);
9177 spin_lock_irq(ioa_cfg->host->host_lock);
9181 ioa_cfg->in_ioa_bringdown = 1;
9182 shutdown_type = IPR_SHUTDOWN_NONE;
9186 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
9191 * ipr_reset_freeze - Hold off all I/O activity
9192 * @ipr_cmd: ipr command struct
9194 * Description: If the PCI slot is frozen, hold off all I/O
9195 * activity; then, as soon as the slot is available again,
9196 * initiate an adapter reset.
9198 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
9200 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9203 /* Disallow new interrupts, avoid loop */
9204 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9205 spin_lock(&ioa_cfg->hrrq[i]._lock);
9206 ioa_cfg->hrrq[i].allow_interrupts = 0;
9207 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9210 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
9211 ipr_cmd->done = ipr_reset_ioa_job;
9212 return IPR_RC_JOB_RETURN;
9216 * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
9217 * @pdev: PCI device struct
9219 * Description: This routine is called to tell us that the MMIO
9220 * access to the IOA has been restored
9222 static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
9224 unsigned long flags = 0;
9225 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9227 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9228 if (!ioa_cfg->probe_done)
9229 pci_save_state(pdev);
9230 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9231 return PCI_ERS_RESULT_NEED_RESET;
9235 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
9236 * @pdev: PCI device struct
9238 * Description: This routine is called to tell us that the PCI bus
9239 * is down. Can't do anything here, except put the device driver
9240 * into a holding pattern, waiting for the PCI bus to come back.
9242 static void ipr_pci_frozen(struct pci_dev *pdev)
9244 unsigned long flags = 0;
9245 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9247 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9248 if (ioa_cfg->probe_done)
9249 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
9250 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9254 * ipr_pci_slot_reset - Called when PCI slot has been reset.
9255 * @pdev: PCI device struct
9257 * Description: This routine is called by the pci error recovery
9258 * code after the PCI slot has been reset, just before we
9259 * should resume normal operations.
9261 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
9263 unsigned long flags = 0;
9264 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9266 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9267 if (ioa_cfg->probe_done) {
9268 if (ioa_cfg->needs_warm_reset)
9269 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9271 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
9274 wake_up_all(&ioa_cfg->eeh_wait_q);
9275 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9276 return PCI_ERS_RESULT_RECOVERED;
9280 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
9281 * @pdev: PCI device struct
9283 * Description: This routine is called when the PCI bus has
9284 * permanently failed.
9286 static void ipr_pci_perm_failure(struct pci_dev *pdev)
9288 unsigned long flags = 0;
9289 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9292 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9293 if (ioa_cfg->probe_done) {
9294 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9295 ioa_cfg->sdt_state = ABORT_DUMP;
9296 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
9297 ioa_cfg->in_ioa_bringdown = 1;
9298 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9299 spin_lock(&ioa_cfg->hrrq[i]._lock);
9300 ioa_cfg->hrrq[i].allow_cmds = 0;
9301 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9304 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9306 wake_up_all(&ioa_cfg->eeh_wait_q);
9307 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9311 * ipr_pci_error_detected - Called when a PCI error is detected.
9312 * @pdev: PCI device struct
9313 * @state: PCI channel state
9315 * Description: Called when a PCI error is detected.
9318 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
9320 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
9321 pci_channel_state_t state)
9324 case pci_channel_io_frozen:
9325 ipr_pci_frozen(pdev);
9326 return PCI_ERS_RESULT_CAN_RECOVER;
9327 case pci_channel_io_perm_failure:
9328 ipr_pci_perm_failure(pdev);
9329 return PCI_ERS_RESULT_DISCONNECT;
9334 return PCI_ERS_RESULT_NEED_RESET;
9338 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9339 * @ioa_cfg: ioa cfg struct
9341 * Description: This is the second phase of adapter intialization
9342 * This function takes care of initilizing the adapter to the point
9343 * where it can accept new commands.
9346 * 0 on success / -EIO on failure
9348 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
9351 unsigned long host_lock_flags = 0;
9354 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9355 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
9356 ioa_cfg->probe_done = 1;
9357 if (ioa_cfg->needs_hard_reset) {
9358 ioa_cfg->needs_hard_reset = 0;
9359 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9361 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
9363 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9370 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9371 * @ioa_cfg: ioa config struct
9376 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9380 if (ioa_cfg->ipr_cmnd_list) {
9381 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9382 if (ioa_cfg->ipr_cmnd_list[i])
9383 dma_pool_free(ioa_cfg->ipr_cmd_pool,
9384 ioa_cfg->ipr_cmnd_list[i],
9385 ioa_cfg->ipr_cmnd_list_dma[i]);
9387 ioa_cfg->ipr_cmnd_list[i] = NULL;
9391 if (ioa_cfg->ipr_cmd_pool)
9392 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
9394 kfree(ioa_cfg->ipr_cmnd_list);
9395 kfree(ioa_cfg->ipr_cmnd_list_dma);
9396 ioa_cfg->ipr_cmnd_list = NULL;
9397 ioa_cfg->ipr_cmnd_list_dma = NULL;
9398 ioa_cfg->ipr_cmd_pool = NULL;
9402 * ipr_free_mem - Frees memory allocated for an adapter
9403 * @ioa_cfg: ioa cfg struct
9408 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
9412 kfree(ioa_cfg->res_entries);
9413 dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
9414 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9415 ipr_free_cmd_blks(ioa_cfg);
9417 for (i = 0; i < ioa_cfg->hrrq_num; i++)
9418 dma_free_coherent(&ioa_cfg->pdev->dev,
9419 sizeof(u32) * ioa_cfg->hrrq[i].size,
9420 ioa_cfg->hrrq[i].host_rrq,
9421 ioa_cfg->hrrq[i].host_rrq_dma);
9423 dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
9424 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9426 for (i = 0; i < IPR_MAX_HCAMS; i++) {
9427 dma_free_coherent(&ioa_cfg->pdev->dev,
9428 sizeof(struct ipr_hostrcb),
9429 ioa_cfg->hostrcb[i],
9430 ioa_cfg->hostrcb_dma[i]);
9433 ipr_free_dump(ioa_cfg);
9434 kfree(ioa_cfg->trace);
9438 * ipr_free_irqs - Free all allocated IRQs for the adapter.
9439 * @ioa_cfg: ipr cfg struct
9441 * This function frees all allocated IRQs for the
9442 * specified adapter.
9447 static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
9449 struct pci_dev *pdev = ioa_cfg->pdev;
9451 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9452 ioa_cfg->intr_flag == IPR_USE_MSIX) {
9454 for (i = 0; i < ioa_cfg->nvectors; i++)
9455 free_irq(ioa_cfg->vectors_info[i].vec,
9458 free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
9460 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9461 pci_disable_msi(pdev);
9462 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9463 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9464 pci_disable_msix(pdev);
9465 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9470 * ipr_free_all_resources - Free all allocated resources for an adapter.
9471 * @ipr_cmd: ipr command struct
9473 * This function frees all allocated resources for the
9474 * specified adapter.
9479 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
9481 struct pci_dev *pdev = ioa_cfg->pdev;
9484 ipr_free_irqs(ioa_cfg);
9485 if (ioa_cfg->reset_work_q)
9486 destroy_workqueue(ioa_cfg->reset_work_q);
9487 iounmap(ioa_cfg->hdw_dma_regs);
9488 pci_release_regions(pdev);
9489 ipr_free_mem(ioa_cfg);
9490 scsi_host_put(ioa_cfg->host);
9491 pci_disable_device(pdev);
9496 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9497 * @ioa_cfg: ioa config struct
9500 * 0 on success / -ENOMEM on allocation failure
9502 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9504 struct ipr_cmnd *ipr_cmd;
9505 struct ipr_ioarcb *ioarcb;
9506 dma_addr_t dma_addr;
9507 int i, entries_each_hrrq, hrrq_id = 0;
9509 ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
9510 sizeof(struct ipr_cmnd), 512, 0);
9512 if (!ioa_cfg->ipr_cmd_pool)
9515 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
9516 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
9518 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
9519 ipr_free_cmd_blks(ioa_cfg);
9523 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9524 if (ioa_cfg->hrrq_num > 1) {
9526 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
9527 ioa_cfg->hrrq[i].min_cmd_id = 0;
9528 ioa_cfg->hrrq[i].max_cmd_id =
9529 (entries_each_hrrq - 1);
9532 IPR_NUM_BASE_CMD_BLKS/
9533 (ioa_cfg->hrrq_num - 1);
9534 ioa_cfg->hrrq[i].min_cmd_id =
9535 IPR_NUM_INTERNAL_CMD_BLKS +
9536 (i - 1) * entries_each_hrrq;
9537 ioa_cfg->hrrq[i].max_cmd_id =
9538 (IPR_NUM_INTERNAL_CMD_BLKS +
9539 i * entries_each_hrrq - 1);
9542 entries_each_hrrq = IPR_NUM_CMD_BLKS;
9543 ioa_cfg->hrrq[i].min_cmd_id = 0;
9544 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9546 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9549 BUG_ON(ioa_cfg->hrrq_num == 0);
9551 i = IPR_NUM_CMD_BLKS -
9552 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9554 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9555 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9558 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9559 ipr_cmd = dma_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
9562 ipr_free_cmd_blks(ioa_cfg);
9566 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
9567 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9568 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9570 ioarcb = &ipr_cmd->ioarcb;
9571 ipr_cmd->dma_addr = dma_addr;
9573 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9575 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9577 ioarcb->host_response_handle = cpu_to_be32(i << 2);
9578 if (ioa_cfg->sis64) {
9579 ioarcb->u.sis64_addr_data.data_ioadl_addr =
9580 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9581 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
9582 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
9584 ioarcb->write_ioadl_addr =
9585 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9586 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9587 ioarcb->ioasa_host_pci_addr =
9588 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
9590 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9591 ipr_cmd->cmd_index = i;
9592 ipr_cmd->ioa_cfg = ioa_cfg;
9593 ipr_cmd->sense_buffer_dma = dma_addr +
9594 offsetof(struct ipr_cmnd, sense_buffer);
9596 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9597 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9598 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9599 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9607 * ipr_alloc_mem - Allocate memory for an adapter
9608 * @ioa_cfg: ioa config struct
9611 * 0 on success / non-zero for error
9613 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9615 struct pci_dev *pdev = ioa_cfg->pdev;
9616 int i, rc = -ENOMEM;
9619 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
9620 ioa_cfg->max_devs_supported, GFP_KERNEL);
9622 if (!ioa_cfg->res_entries)
9625 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
9626 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
9627 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9630 ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9631 sizeof(struct ipr_misc_cbs),
9632 &ioa_cfg->vpd_cbs_dma,
9635 if (!ioa_cfg->vpd_cbs)
9636 goto out_free_res_entries;
9638 if (ipr_alloc_cmd_blks(ioa_cfg))
9639 goto out_free_vpd_cbs;
9641 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9642 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
9643 sizeof(u32) * ioa_cfg->hrrq[i].size,
9644 &ioa_cfg->hrrq[i].host_rrq_dma,
9647 if (!ioa_cfg->hrrq[i].host_rrq) {
9649 dma_free_coherent(&pdev->dev,
9650 sizeof(u32) * ioa_cfg->hrrq[i].size,
9651 ioa_cfg->hrrq[i].host_rrq,
9652 ioa_cfg->hrrq[i].host_rrq_dma);
9653 goto out_ipr_free_cmd_blocks;
9655 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9658 ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9659 ioa_cfg->cfg_table_size,
9660 &ioa_cfg->cfg_table_dma,
9663 if (!ioa_cfg->u.cfg_table)
9664 goto out_free_host_rrq;
9666 for (i = 0; i < IPR_MAX_HCAMS; i++) {
9667 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9668 sizeof(struct ipr_hostrcb),
9669 &ioa_cfg->hostrcb_dma[i],
9672 if (!ioa_cfg->hostrcb[i])
9673 goto out_free_hostrcb_dma;
9675 ioa_cfg->hostrcb[i]->hostrcb_dma =
9676 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9677 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9678 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9681 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
9682 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
9684 if (!ioa_cfg->trace)
9685 goto out_free_hostrcb_dma;
9692 out_free_hostrcb_dma:
9694 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9695 ioa_cfg->hostrcb[i],
9696 ioa_cfg->hostrcb_dma[i]);
9698 dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9699 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9701 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9702 dma_free_coherent(&pdev->dev,
9703 sizeof(u32) * ioa_cfg->hrrq[i].size,
9704 ioa_cfg->hrrq[i].host_rrq,
9705 ioa_cfg->hrrq[i].host_rrq_dma);
9707 out_ipr_free_cmd_blocks:
9708 ipr_free_cmd_blks(ioa_cfg);
9710 dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9711 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9712 out_free_res_entries:
9713 kfree(ioa_cfg->res_entries);
9718 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9719 * @ioa_cfg: ioa config struct
9724 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9728 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9729 ioa_cfg->bus_attr[i].bus = i;
9730 ioa_cfg->bus_attr[i].qas_enabled = 0;
9731 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9732 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9733 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9735 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9740 * ipr_init_regs - Initialize IOA registers
9741 * @ioa_cfg: ioa config struct
9746 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9748 const struct ipr_interrupt_offsets *p;
9749 struct ipr_interrupts *t;
9752 p = &ioa_cfg->chip_cfg->regs;
9754 base = ioa_cfg->hdw_dma_regs;
9756 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9757 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9758 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9759 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9760 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9761 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9762 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9763 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9764 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9765 t->ioarrin_reg = base + p->ioarrin_reg;
9766 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9767 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9768 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9769 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9770 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9771 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9773 if (ioa_cfg->sis64) {
9774 t->init_feedback_reg = base + p->init_feedback_reg;
9775 t->dump_addr_reg = base + p->dump_addr_reg;
9776 t->dump_data_reg = base + p->dump_data_reg;
9777 t->endian_swap_reg = base + p->endian_swap_reg;
9782 * ipr_init_ioa_cfg - Initialize IOA config struct
9783 * @ioa_cfg: ioa config struct
9784 * @host: scsi host struct
9785 * @pdev: PCI dev struct
9790 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9791 struct Scsi_Host *host, struct pci_dev *pdev)
9795 ioa_cfg->host = host;
9796 ioa_cfg->pdev = pdev;
9797 ioa_cfg->log_level = ipr_log_level;
9798 ioa_cfg->doorbell = IPR_DOORBELL;
9799 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9800 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9801 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9802 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9803 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9804 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9806 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9807 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9808 INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q);
9809 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9810 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9811 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9812 init_waitqueue_head(&ioa_cfg->reset_wait_q);
9813 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9814 init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9815 ioa_cfg->sdt_state = INACTIVE;
9817 ipr_initialize_bus_attr(ioa_cfg);
9818 ioa_cfg->max_devs_supported = ipr_max_devs;
9820 if (ioa_cfg->sis64) {
9821 host->max_channel = IPR_MAX_SIS64_BUSES;
9822 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9823 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9824 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9825 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9826 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9827 + ((sizeof(struct ipr_config_table_entry64)
9828 * ioa_cfg->max_devs_supported)));
9830 host->max_channel = IPR_VSET_BUS;
9831 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9832 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9833 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9834 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9835 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9836 + ((sizeof(struct ipr_config_table_entry)
9837 * ioa_cfg->max_devs_supported)));
9840 host->unique_id = host->host_no;
9841 host->max_cmd_len = IPR_MAX_CDB_LEN;
9842 host->can_queue = ioa_cfg->max_cmds;
9843 pci_set_drvdata(pdev, ioa_cfg);
9845 for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9846 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9847 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9848 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9850 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9852 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9857 * ipr_get_chip_info - Find adapter chip information
9858 * @dev_id: PCI device id struct
9861 * ptr to chip information on success / NULL on failure
9863 static const struct ipr_chip_t *
9864 ipr_get_chip_info(const struct pci_device_id *dev_id)
9868 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9869 if (ipr_chip[i].vendor == dev_id->vendor &&
9870 ipr_chip[i].device == dev_id->device)
9871 return &ipr_chip[i];
9876 * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9878 * @ioa_cfg: ioa config struct
9883 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
9885 struct pci_dev *pdev = ioa_cfg->pdev;
9887 if (pci_channel_offline(pdev)) {
9888 wait_event_timeout(ioa_cfg->eeh_wait_q,
9889 !pci_channel_offline(pdev),
9890 IPR_PCI_ERROR_RECOVERY_TIMEOUT);
9891 pci_restore_state(pdev);
9895 static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
9897 struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
9900 for (i = 0; i < ARRAY_SIZE(entries); ++i)
9901 entries[i].entry = i;
9903 vectors = pci_enable_msix_range(ioa_cfg->pdev,
9904 entries, 1, ipr_number_of_msix);
9906 ipr_wait_for_pci_err_recovery(ioa_cfg);
9910 for (i = 0; i < vectors; i++)
9911 ioa_cfg->vectors_info[i].vec = entries[i].vector;
9912 ioa_cfg->nvectors = vectors;
9917 static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
9921 vectors = pci_enable_msi_range(ioa_cfg->pdev, 1, ipr_number_of_msix);
9923 ipr_wait_for_pci_err_recovery(ioa_cfg);
9927 for (i = 0; i < vectors; i++)
9928 ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
9929 ioa_cfg->nvectors = vectors;
9934 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9936 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9938 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9939 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9940 "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9941 ioa_cfg->vectors_info[vec_idx].
9942 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9946 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
9950 for (i = 1; i < ioa_cfg->nvectors; i++) {
9951 rc = request_irq(ioa_cfg->vectors_info[i].vec,
9954 ioa_cfg->vectors_info[i].desc,
9958 free_irq(ioa_cfg->vectors_info[i].vec,
9967 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9968 * @pdev: PCI device struct
9970 * Description: Simply set the msi_received flag to 1 indicating that
9971 * Message Signaled Interrupts are supported.
9974 * 0 on success / non-zero on failure
9976 static irqreturn_t ipr_test_intr(int irq, void *devp)
9978 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
9979 unsigned long lock_flags = 0;
9980 irqreturn_t rc = IRQ_HANDLED;
9982 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
9983 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9985 ioa_cfg->msi_received = 1;
9986 wake_up(&ioa_cfg->msi_wait_q);
9988 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9993 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9994 * @pdev: PCI device struct
9996 * Description: The return value from pci_enable_msi_range() can not always be
9997 * trusted. This routine sets up and initiates a test interrupt to determine
9998 * if the interrupt is received via the ipr_test_intr() service routine.
9999 * If the tests fails, the driver will fall back to LSI.
10002 * 0 on success / non-zero on failure
10004 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
10007 volatile u32 int_reg;
10008 unsigned long lock_flags = 0;
10012 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10013 init_waitqueue_head(&ioa_cfg->msi_wait_q);
10014 ioa_cfg->msi_received = 0;
10015 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10016 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
10017 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
10018 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10020 if (ioa_cfg->intr_flag == IPR_USE_MSIX)
10021 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
10023 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
10025 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
10027 } else if (ipr_debug)
10028 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
10030 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
10031 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
10032 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
10033 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10034 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10036 if (!ioa_cfg->msi_received) {
10037 /* MSI test failed */
10038 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
10040 } else if (ipr_debug)
10041 dev_info(&pdev->dev, "MSI test succeeded.\n");
10043 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10045 if (ioa_cfg->intr_flag == IPR_USE_MSIX)
10046 free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
10048 free_irq(pdev->irq, ioa_cfg);
10055 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
10056 * @pdev: PCI device struct
10057 * @dev_id: PCI device id struct
10060 * 0 on success / non-zero on failure
10062 static int ipr_probe_ioa(struct pci_dev *pdev,
10063 const struct pci_device_id *dev_id)
10065 struct ipr_ioa_cfg *ioa_cfg;
10066 struct Scsi_Host *host;
10067 unsigned long ipr_regs_pci;
10068 void __iomem *ipr_regs;
10069 int rc = PCIBIOS_SUCCESSFUL;
10070 volatile u32 mask, uproc, interrupts;
10071 unsigned long lock_flags, driver_lock_flags;
10075 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
10076 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
10079 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
10084 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
10085 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
10086 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
10088 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
10090 if (!ioa_cfg->ipr_chip) {
10091 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
10092 dev_id->vendor, dev_id->device);
10093 goto out_scsi_host_put;
10096 /* set SIS 32 or SIS 64 */
10097 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
10098 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
10099 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
10100 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
10102 if (ipr_transop_timeout)
10103 ioa_cfg->transop_timeout = ipr_transop_timeout;
10104 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
10105 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
10107 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
10109 ioa_cfg->revid = pdev->revision;
10111 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
10113 ipr_regs_pci = pci_resource_start(pdev, 0);
10115 rc = pci_request_regions(pdev, IPR_NAME);
10117 dev_err(&pdev->dev,
10118 "Couldn't register memory range of registers\n");
10119 goto out_scsi_host_put;
10122 rc = pci_enable_device(pdev);
10124 if (rc || pci_channel_offline(pdev)) {
10125 if (pci_channel_offline(pdev)) {
10126 ipr_wait_for_pci_err_recovery(ioa_cfg);
10127 rc = pci_enable_device(pdev);
10131 dev_err(&pdev->dev, "Cannot enable adapter\n");
10132 ipr_wait_for_pci_err_recovery(ioa_cfg);
10133 goto out_release_regions;
10137 ipr_regs = pci_ioremap_bar(pdev, 0);
10140 dev_err(&pdev->dev,
10141 "Couldn't map memory range of registers\n");
10146 ioa_cfg->hdw_dma_regs = ipr_regs;
10147 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
10148 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
10150 ipr_init_regs(ioa_cfg);
10152 if (ioa_cfg->sis64) {
10153 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10155 dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
10156 rc = dma_set_mask_and_coherent(&pdev->dev,
10160 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10163 dev_err(&pdev->dev, "Failed to set DMA mask\n");
10164 goto cleanup_nomem;
10167 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
10168 ioa_cfg->chip_cfg->cache_line_size);
10170 if (rc != PCIBIOS_SUCCESSFUL) {
10171 dev_err(&pdev->dev, "Write of cache line size failed\n");
10172 ipr_wait_for_pci_err_recovery(ioa_cfg);
10174 goto cleanup_nomem;
10177 /* Issue MMIO read to ensure card is not in EEH */
10178 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
10179 ipr_wait_for_pci_err_recovery(ioa_cfg);
10181 if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
10182 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
10183 IPR_MAX_MSIX_VECTORS);
10184 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
10187 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
10188 ipr_enable_msix(ioa_cfg) == 0)
10189 ioa_cfg->intr_flag = IPR_USE_MSIX;
10190 else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
10191 ipr_enable_msi(ioa_cfg) == 0)
10192 ioa_cfg->intr_flag = IPR_USE_MSI;
10194 ioa_cfg->intr_flag = IPR_USE_LSI;
10195 ioa_cfg->clear_isr = 1;
10196 ioa_cfg->nvectors = 1;
10197 dev_info(&pdev->dev, "Cannot enable MSI.\n");
10200 pci_set_master(pdev);
10202 if (pci_channel_offline(pdev)) {
10203 ipr_wait_for_pci_err_recovery(ioa_cfg);
10204 pci_set_master(pdev);
10205 if (pci_channel_offline(pdev)) {
10207 goto out_msi_disable;
10211 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
10212 ioa_cfg->intr_flag == IPR_USE_MSIX) {
10213 rc = ipr_test_msi(ioa_cfg, pdev);
10214 if (rc == -EOPNOTSUPP) {
10215 ipr_wait_for_pci_err_recovery(ioa_cfg);
10216 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
10217 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
10218 pci_disable_msi(pdev);
10219 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
10220 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
10221 pci_disable_msix(pdev);
10224 ioa_cfg->intr_flag = IPR_USE_LSI;
10225 ioa_cfg->nvectors = 1;
10228 goto out_msi_disable;
10230 if (ioa_cfg->intr_flag == IPR_USE_MSI)
10231 dev_info(&pdev->dev,
10232 "Request for %d MSIs succeeded with starting IRQ: %d\n",
10233 ioa_cfg->nvectors, pdev->irq);
10234 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
10235 dev_info(&pdev->dev,
10236 "Request for %d MSIXs succeeded.",
10237 ioa_cfg->nvectors);
10241 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
10242 (unsigned int)num_online_cpus(),
10243 (unsigned int)IPR_MAX_HRRQ_NUM);
10245 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
10246 goto out_msi_disable;
10248 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
10249 goto out_msi_disable;
10251 rc = ipr_alloc_mem(ioa_cfg);
10253 dev_err(&pdev->dev,
10254 "Couldn't allocate enough memory for device driver!\n");
10255 goto out_msi_disable;
10258 /* Save away PCI config space for use following IOA reset */
10259 rc = pci_save_state(pdev);
10261 if (rc != PCIBIOS_SUCCESSFUL) {
10262 dev_err(&pdev->dev, "Failed to save PCI config space\n");
10264 goto cleanup_nolog;
10268 * If HRRQ updated interrupt is not masked, or reset alert is set,
10269 * the card is in an unknown state and needs a hard reset
10271 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
10272 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
10273 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
10274 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
10275 ioa_cfg->needs_hard_reset = 1;
10276 if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
10277 ioa_cfg->needs_hard_reset = 1;
10278 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
10279 ioa_cfg->ioa_unit_checked = 1;
10281 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10282 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10283 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10285 if (ioa_cfg->intr_flag == IPR_USE_MSI
10286 || ioa_cfg->intr_flag == IPR_USE_MSIX) {
10287 name_msi_vectors(ioa_cfg);
10288 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
10290 ioa_cfg->vectors_info[0].desc,
10291 &ioa_cfg->hrrq[0]);
10293 rc = ipr_request_other_msi_irqs(ioa_cfg);
10295 rc = request_irq(pdev->irq, ipr_isr,
10297 IPR_NAME, &ioa_cfg->hrrq[0]);
10300 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
10302 goto cleanup_nolog;
10305 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
10306 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
10307 ioa_cfg->needs_warm_reset = 1;
10308 ioa_cfg->reset = ipr_reset_slot_reset;
10310 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
10311 WQ_MEM_RECLAIM, host->host_no);
10313 if (!ioa_cfg->reset_work_q) {
10314 dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
10319 ioa_cfg->reset = ipr_reset_start_bist;
10321 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10322 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
10323 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10330 ipr_free_irqs(ioa_cfg);
10332 ipr_free_mem(ioa_cfg);
10334 ipr_wait_for_pci_err_recovery(ioa_cfg);
10335 if (ioa_cfg->intr_flag == IPR_USE_MSI)
10336 pci_disable_msi(pdev);
10337 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
10338 pci_disable_msix(pdev);
10342 pci_disable_device(pdev);
10343 out_release_regions:
10344 pci_release_regions(pdev);
10346 scsi_host_put(host);
10351 * ipr_initiate_ioa_bringdown - Bring down an adapter
10352 * @ioa_cfg: ioa config struct
10353 * @shutdown_type: shutdown type
10355 * Description: This function will initiate bringing down the adapter.
10356 * This consists of issuing an IOA shutdown to the adapter
10357 * to flush the cache, and running BIST.
10358 * If the caller needs to wait on the completion of the reset,
10359 * the caller must sleep on the reset_wait_q.
10364 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
10365 enum ipr_shutdown_type shutdown_type)
10368 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
10369 ioa_cfg->sdt_state = ABORT_DUMP;
10370 ioa_cfg->reset_retries = 0;
10371 ioa_cfg->in_ioa_bringdown = 1;
10372 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
10377 * __ipr_remove - Remove a single adapter
10378 * @pdev: pci device struct
10380 * Adapter hot plug remove entry point.
10385 static void __ipr_remove(struct pci_dev *pdev)
10387 unsigned long host_lock_flags = 0;
10388 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10390 unsigned long driver_lock_flags;
10393 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10394 while (ioa_cfg->in_reset_reload) {
10395 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10396 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10397 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10400 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
10401 spin_lock(&ioa_cfg->hrrq[i]._lock);
10402 ioa_cfg->hrrq[i].removing_ioa = 1;
10403 spin_unlock(&ioa_cfg->hrrq[i]._lock);
10406 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10408 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10409 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10410 flush_work(&ioa_cfg->work_q);
10411 if (ioa_cfg->reset_work_q)
10412 flush_workqueue(ioa_cfg->reset_work_q);
10413 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
10414 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10416 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10417 list_del(&ioa_cfg->queue);
10418 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10420 if (ioa_cfg->sdt_state == ABORT_DUMP)
10421 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
10422 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10424 ipr_free_all_resources(ioa_cfg);
10430 * ipr_remove - IOA hot plug remove entry point
10431 * @pdev: pci device struct
10433 * Adapter hot plug remove entry point.
10438 static void ipr_remove(struct pci_dev *pdev)
10440 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10444 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10446 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10448 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10449 &ipr_ioa_async_err_log);
10450 scsi_remove_host(ioa_cfg->host);
10452 __ipr_remove(pdev);
10458 * ipr_probe - Adapter hot plug add entry point
10461 * 0 on success / non-zero on failure
10463 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
10465 struct ipr_ioa_cfg *ioa_cfg;
10466 unsigned long flags;
10469 rc = ipr_probe_ioa(pdev, dev_id);
10474 ioa_cfg = pci_get_drvdata(pdev);
10475 rc = ipr_probe_ioa_part2(ioa_cfg);
10478 __ipr_remove(pdev);
10482 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
10485 __ipr_remove(pdev);
10489 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
10493 scsi_remove_host(ioa_cfg->host);
10494 __ipr_remove(pdev);
10498 rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj,
10499 &ipr_ioa_async_err_log);
10502 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10504 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10506 scsi_remove_host(ioa_cfg->host);
10507 __ipr_remove(pdev);
10511 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
10515 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10516 &ipr_ioa_async_err_log);
10517 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10519 scsi_remove_host(ioa_cfg->host);
10520 __ipr_remove(pdev);
10523 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10524 ioa_cfg->scan_enabled = 1;
10525 schedule_work(&ioa_cfg->work_q);
10526 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10528 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
10530 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10531 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
10532 irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
10533 ioa_cfg->iopoll_weight, ipr_iopoll);
10537 scsi_scan_host(ioa_cfg->host);
10543 * ipr_shutdown - Shutdown handler.
10544 * @pdev: pci device struct
10546 * This function is invoked upon system shutdown/reboot. It will issue
10547 * an adapter shutdown to the adapter to flush the write cache.
10552 static void ipr_shutdown(struct pci_dev *pdev)
10554 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10555 unsigned long lock_flags = 0;
10556 enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
10559 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10560 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10561 ioa_cfg->iopoll_weight = 0;
10562 for (i = 1; i < ioa_cfg->hrrq_num; i++)
10563 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
10566 while (ioa_cfg->in_reset_reload) {
10567 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10568 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10569 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10572 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
10573 shutdown_type = IPR_SHUTDOWN_QUIESCE;
10575 ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
10576 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10577 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10578 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
10579 ipr_free_irqs(ioa_cfg);
10580 pci_disable_device(ioa_cfg->pdev);
10584 static struct pci_device_id ipr_pci_table[] = {
10585 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10586 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
10587 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10588 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
10589 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10590 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
10591 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10592 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
10593 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10594 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
10595 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10596 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
10597 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10598 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
10599 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10600 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10601 IPR_USE_LONG_TRANSOP_TIMEOUT },
10602 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10603 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10604 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10605 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10606 IPR_USE_LONG_TRANSOP_TIMEOUT },
10607 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10608 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10609 IPR_USE_LONG_TRANSOP_TIMEOUT },
10610 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10611 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10612 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10613 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10614 IPR_USE_LONG_TRANSOP_TIMEOUT},
10615 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10616 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10617 IPR_USE_LONG_TRANSOP_TIMEOUT },
10618 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10619 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10620 IPR_USE_LONG_TRANSOP_TIMEOUT },
10621 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10622 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10623 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10624 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10625 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10626 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
10627 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
10628 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
10629 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
10630 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10631 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
10632 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10633 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10634 IPR_USE_LONG_TRANSOP_TIMEOUT },
10635 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10636 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10637 IPR_USE_LONG_TRANSOP_TIMEOUT },
10638 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10639 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10640 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10641 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10642 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10643 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
10644 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10645 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10646 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10647 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10648 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10649 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
10650 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10651 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
10652 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10653 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
10654 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10655 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
10656 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10657 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10658 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10659 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
10660 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10661 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10662 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10663 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10664 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10665 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10666 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10667 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
10668 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10669 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10670 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10671 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10672 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10673 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10674 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10675 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10676 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10677 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10678 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10679 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10680 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10681 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10682 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10683 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
10684 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10685 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10686 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10687 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10688 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10689 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
10690 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10691 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580A, 0, 0, 0 },
10692 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10693 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580B, 0, 0, 0 },
10696 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10698 static const struct pci_error_handlers ipr_err_handler = {
10699 .error_detected = ipr_pci_error_detected,
10700 .mmio_enabled = ipr_pci_mmio_enabled,
10701 .slot_reset = ipr_pci_slot_reset,
10704 static struct pci_driver ipr_driver = {
10706 .id_table = ipr_pci_table,
10707 .probe = ipr_probe,
10708 .remove = ipr_remove,
10709 .shutdown = ipr_shutdown,
10710 .err_handler = &ipr_err_handler,
10714 * ipr_halt_done - Shutdown prepare completion
10719 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10721 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10725 * ipr_halt - Issue shutdown prepare to all adapters
10728 * NOTIFY_OK on success / NOTIFY_DONE on failure
10730 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10732 struct ipr_cmnd *ipr_cmd;
10733 struct ipr_ioa_cfg *ioa_cfg;
10734 unsigned long flags = 0, driver_lock_flags;
10736 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10737 return NOTIFY_DONE;
10739 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10741 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10742 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10743 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10744 (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
10745 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10749 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10750 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10751 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10752 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10753 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10755 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10756 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10758 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10763 static struct notifier_block ipr_notifier = {
10768 * ipr_init - Module entry point
10771 * 0 on success / negative value on failure
10773 static int __init ipr_init(void)
10775 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10776 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10778 register_reboot_notifier(&ipr_notifier);
10779 return pci_register_driver(&ipr_driver);
10783 * ipr_exit - Module unload
10785 * Module unload entry point.
10790 static void __exit ipr_exit(void)
10792 unregister_reboot_notifier(&ipr_notifier);
10793 pci_unregister_driver(&ipr_driver);
10796 module_init(ipr_init);
10797 module_exit(ipr_exit);