2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2016 Microsemi Corporation
4 * Copyright 2014-2015 PMC-Sierra, Inc.
5 * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more details.
16 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
20 #include <linux/module.h>
21 #include <linux/interrupt.h>
22 #include <linux/types.h>
23 #include <linux/pci.h>
24 #include <linux/pci-aspm.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/delay.h>
29 #include <linux/timer.h>
30 #include <linux/init.h>
31 #include <linux/spinlock.h>
32 #include <linux/compat.h>
33 #include <linux/blktrace_api.h>
34 #include <linux/uaccess.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/completion.h>
38 #include <linux/moduleparam.h>
39 #include <scsi/scsi.h>
40 #include <scsi/scsi_cmnd.h>
41 #include <scsi/scsi_device.h>
42 #include <scsi/scsi_host.h>
43 #include <scsi/scsi_tcq.h>
44 #include <scsi/scsi_eh.h>
45 #include <scsi/scsi_transport_sas.h>
46 #include <scsi/scsi_dbg.h>
47 #include <linux/cciss_ioctl.h>
48 #include <linux/string.h>
49 #include <linux/bitmap.h>
50 #include <linux/atomic.h>
51 #include <linux/jiffies.h>
52 #include <linux/percpu-defs.h>
53 #include <linux/percpu.h>
54 #include <asm/unaligned.h>
55 #include <asm/div64.h>
60 * HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.'
61 * with an optional trailing '-' followed by a byte value (0-255).
63 #define HPSA_DRIVER_VERSION "3.4.20-125"
64 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
67 /* How long to wait for CISS doorbell communication */
68 #define CLEAR_EVENT_WAIT_INTERVAL 20 /* ms for each msleep() call */
69 #define MODE_CHANGE_WAIT_INTERVAL 10 /* ms for each msleep() call */
70 #define MAX_CLEAR_EVENT_WAIT 30000 /* times 20 ms = 600 s */
71 #define MAX_MODE_CHANGE_WAIT 2000 /* times 10 ms = 20 s */
72 #define MAX_IOCTL_CONFIG_WAIT 1000
74 /*define how many times we will try a command because of bus resets */
75 #define MAX_CMD_RETRIES 3
77 /* Embedded module documentation macros - see modules.h */
78 MODULE_AUTHOR("Hewlett-Packard Company");
79 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
81 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
82 MODULE_VERSION(HPSA_DRIVER_VERSION);
83 MODULE_LICENSE("GPL");
84 MODULE_ALIAS("cciss");
86 static int hpsa_simple_mode;
87 module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
88 MODULE_PARM_DESC(hpsa_simple_mode,
89 "Use 'simple mode' rather than 'performant mode'");
91 /* define the PCI info for the cards we can control */
92 static const struct pci_device_id hpsa_pci_device_id[] = {
93 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103c, 0x1920},
109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
111 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
112 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
113 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103c, 0x1925},
114 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
115 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
116 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929},
117 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD},
118 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE},
119 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF},
120 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0},
121 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1},
122 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2},
123 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3},
124 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4},
125 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5},
126 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6},
127 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
128 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
129 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
130 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA},
131 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB},
132 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC},
133 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD},
134 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE},
135 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0580},
136 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0581},
137 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0582},
138 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0583},
139 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0584},
140 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0585},
141 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
142 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
143 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
144 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
145 {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
146 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
147 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
148 {PCI_VENDOR_ID_COMPAQ, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
149 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
153 MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
155 /* board_id = Subsystem Device ID & Vendor ID
156 * product = Marketing Name for the board
157 * access = Address of the struct of function pointers
159 static struct board_type products[] = {
160 {0x40700E11, "Smart Array 5300", &SA5A_access},
161 {0x40800E11, "Smart Array 5i", &SA5B_access},
162 {0x40820E11, "Smart Array 532", &SA5B_access},
163 {0x40830E11, "Smart Array 5312", &SA5B_access},
164 {0x409A0E11, "Smart Array 641", &SA5A_access},
165 {0x409B0E11, "Smart Array 642", &SA5A_access},
166 {0x409C0E11, "Smart Array 6400", &SA5A_access},
167 {0x409D0E11, "Smart Array 6400 EM", &SA5A_access},
168 {0x40910E11, "Smart Array 6i", &SA5A_access},
169 {0x3225103C, "Smart Array P600", &SA5A_access},
170 {0x3223103C, "Smart Array P800", &SA5A_access},
171 {0x3234103C, "Smart Array P400", &SA5A_access},
172 {0x3235103C, "Smart Array P400i", &SA5A_access},
173 {0x3211103C, "Smart Array E200i", &SA5A_access},
174 {0x3212103C, "Smart Array E200", &SA5A_access},
175 {0x3213103C, "Smart Array E200i", &SA5A_access},
176 {0x3214103C, "Smart Array E200i", &SA5A_access},
177 {0x3215103C, "Smart Array E200i", &SA5A_access},
178 {0x3237103C, "Smart Array E500", &SA5A_access},
179 {0x323D103C, "Smart Array P700m", &SA5A_access},
180 {0x3241103C, "Smart Array P212", &SA5_access},
181 {0x3243103C, "Smart Array P410", &SA5_access},
182 {0x3245103C, "Smart Array P410i", &SA5_access},
183 {0x3247103C, "Smart Array P411", &SA5_access},
184 {0x3249103C, "Smart Array P812", &SA5_access},
185 {0x324A103C, "Smart Array P712m", &SA5_access},
186 {0x324B103C, "Smart Array P711m", &SA5_access},
187 {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */
188 {0x3350103C, "Smart Array P222", &SA5_access},
189 {0x3351103C, "Smart Array P420", &SA5_access},
190 {0x3352103C, "Smart Array P421", &SA5_access},
191 {0x3353103C, "Smart Array P822", &SA5_access},
192 {0x3354103C, "Smart Array P420i", &SA5_access},
193 {0x3355103C, "Smart Array P220i", &SA5_access},
194 {0x3356103C, "Smart Array P721m", &SA5_access},
195 {0x1920103C, "Smart Array P430i", &SA5_access},
196 {0x1921103C, "Smart Array P830i", &SA5_access},
197 {0x1922103C, "Smart Array P430", &SA5_access},
198 {0x1923103C, "Smart Array P431", &SA5_access},
199 {0x1924103C, "Smart Array P830", &SA5_access},
200 {0x1925103C, "Smart Array P831", &SA5_access},
201 {0x1926103C, "Smart Array P731m", &SA5_access},
202 {0x1928103C, "Smart Array P230i", &SA5_access},
203 {0x1929103C, "Smart Array P530", &SA5_access},
204 {0x21BD103C, "Smart Array P244br", &SA5_access},
205 {0x21BE103C, "Smart Array P741m", &SA5_access},
206 {0x21BF103C, "Smart HBA H240ar", &SA5_access},
207 {0x21C0103C, "Smart Array P440ar", &SA5_access},
208 {0x21C1103C, "Smart Array P840ar", &SA5_access},
209 {0x21C2103C, "Smart Array P440", &SA5_access},
210 {0x21C3103C, "Smart Array P441", &SA5_access},
211 {0x21C4103C, "Smart Array", &SA5_access},
212 {0x21C5103C, "Smart Array P841", &SA5_access},
213 {0x21C6103C, "Smart HBA H244br", &SA5_access},
214 {0x21C7103C, "Smart HBA H240", &SA5_access},
215 {0x21C8103C, "Smart HBA H241", &SA5_access},
216 {0x21C9103C, "Smart Array", &SA5_access},
217 {0x21CA103C, "Smart Array P246br", &SA5_access},
218 {0x21CB103C, "Smart Array P840", &SA5_access},
219 {0x21CC103C, "Smart Array", &SA5_access},
220 {0x21CD103C, "Smart Array", &SA5_access},
221 {0x21CE103C, "Smart HBA", &SA5_access},
222 {0x05809005, "SmartHBA-SA", &SA5_access},
223 {0x05819005, "SmartHBA-SA 8i", &SA5_access},
224 {0x05829005, "SmartHBA-SA 8i8e", &SA5_access},
225 {0x05839005, "SmartHBA-SA 8e", &SA5_access},
226 {0x05849005, "SmartHBA-SA 16i", &SA5_access},
227 {0x05859005, "SmartHBA-SA 4i4e", &SA5_access},
228 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
229 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
230 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
231 {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
232 {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
233 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
236 static struct scsi_transport_template *hpsa_sas_transport_template;
237 static int hpsa_add_sas_host(struct ctlr_info *h);
238 static void hpsa_delete_sas_host(struct ctlr_info *h);
239 static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node,
240 struct hpsa_scsi_dev_t *device);
241 static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device);
242 static struct hpsa_scsi_dev_t
243 *hpsa_find_device_by_sas_rphy(struct ctlr_info *h,
244 struct sas_rphy *rphy);
246 #define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy)
247 static const struct scsi_cmnd hpsa_cmd_busy;
248 #define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle)
249 static const struct scsi_cmnd hpsa_cmd_idle;
250 static int number_of_controllers;
252 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
253 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
254 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
257 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd,
261 static void cmd_free(struct ctlr_info *h, struct CommandList *c);
262 static struct CommandList *cmd_alloc(struct ctlr_info *h);
263 static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c);
264 static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
265 struct scsi_cmnd *scmd);
266 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
267 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
269 static void hpsa_free_cmd_pool(struct ctlr_info *h);
270 #define VPD_PAGE (1 << 8)
271 #define HPSA_SIMPLE_ERROR_BITS 0x03
273 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
274 static void hpsa_scan_start(struct Scsi_Host *);
275 static int hpsa_scan_finished(struct Scsi_Host *sh,
276 unsigned long elapsed_time);
277 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
279 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
280 static int hpsa_slave_alloc(struct scsi_device *sdev);
281 static int hpsa_slave_configure(struct scsi_device *sdev);
282 static void hpsa_slave_destroy(struct scsi_device *sdev);
284 static void hpsa_update_scsi_devices(struct ctlr_info *h);
285 static int check_for_unit_attention(struct ctlr_info *h,
286 struct CommandList *c);
287 static void check_ioctl_unit_attention(struct ctlr_info *h,
288 struct CommandList *c);
289 /* performant mode helper functions */
290 static void calc_bucket_map(int *bucket, int num_buckets,
291 int nsgs, int min_blocks, u32 *bucket_map);
292 static void hpsa_free_performant_mode(struct ctlr_info *h);
293 static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
294 static inline u32 next_command(struct ctlr_info *h, u8 q);
295 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
296 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
298 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
299 unsigned long *memory_bar);
300 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id,
302 static int wait_for_device_to_become_ready(struct ctlr_info *h,
303 unsigned char lunaddr[],
305 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
307 static inline void finish_cmd(struct CommandList *c);
308 static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
309 #define BOARD_NOT_READY 0
310 #define BOARD_READY 1
311 static void hpsa_drain_accel_commands(struct ctlr_info *h);
312 static void hpsa_flush_cache(struct ctlr_info *h);
313 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
314 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
315 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk);
316 static void hpsa_command_resubmit_worker(struct work_struct *work);
317 static u32 lockup_detected(struct ctlr_info *h);
318 static int detect_controller_lockup(struct ctlr_info *h);
319 static void hpsa_disable_rld_caching(struct ctlr_info *h);
320 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
321 struct ReportExtendedLUNdata *buf, int bufsize);
322 static bool hpsa_vpd_page_supported(struct ctlr_info *h,
323 unsigned char scsi3addr[], u8 page);
324 static int hpsa_luns_changed(struct ctlr_info *h);
325 static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
326 struct hpsa_scsi_dev_t *dev,
327 unsigned char *scsi3addr);
329 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
331 unsigned long *priv = shost_priv(sdev->host);
332 return (struct ctlr_info *) *priv;
335 static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
337 unsigned long *priv = shost_priv(sh);
338 return (struct ctlr_info *) *priv;
341 static inline bool hpsa_is_cmd_idle(struct CommandList *c)
343 return c->scsi_cmd == SCSI_CMD_IDLE;
346 static inline bool hpsa_is_pending_event(struct CommandList *c)
348 return c->reset_pending;
351 /* extract sense key, asc, and ascq from sense data. -1 means invalid. */
352 static void decode_sense_data(const u8 *sense_data, int sense_data_len,
353 u8 *sense_key, u8 *asc, u8 *ascq)
355 struct scsi_sense_hdr sshdr;
362 if (sense_data_len < 1)
365 rc = scsi_normalize_sense(sense_data, sense_data_len, &sshdr);
367 *sense_key = sshdr.sense_key;
373 static int check_for_unit_attention(struct ctlr_info *h,
374 struct CommandList *c)
376 u8 sense_key, asc, ascq;
379 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
380 sense_len = sizeof(c->err_info->SenseInfo);
382 sense_len = c->err_info->SenseLen;
384 decode_sense_data(c->err_info->SenseInfo, sense_len,
385 &sense_key, &asc, &ascq);
386 if (sense_key != UNIT_ATTENTION || asc == 0xff)
391 dev_warn(&h->pdev->dev,
392 "%s: a state change detected, command retried\n",
396 dev_warn(&h->pdev->dev,
397 "%s: LUN failure detected\n", h->devname);
399 case REPORT_LUNS_CHANGED:
400 dev_warn(&h->pdev->dev,
401 "%s: report LUN data changed\n", h->devname);
403 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
404 * target (array) devices.
408 dev_warn(&h->pdev->dev,
409 "%s: a power on or device reset detected\n",
412 case UNIT_ATTENTION_CLEARED:
413 dev_warn(&h->pdev->dev,
414 "%s: unit attention cleared by another initiator\n",
418 dev_warn(&h->pdev->dev,
419 "%s: unknown unit attention detected\n",
426 static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
428 if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
429 (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
430 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
432 dev_warn(&h->pdev->dev, HPSA "device busy");
436 static u32 lockup_detected(struct ctlr_info *h);
437 static ssize_t host_show_lockup_detected(struct device *dev,
438 struct device_attribute *attr, char *buf)
442 struct Scsi_Host *shost = class_to_shost(dev);
444 h = shost_to_hba(shost);
445 ld = lockup_detected(h);
447 return sprintf(buf, "ld=%d\n", ld);
450 static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
451 struct device_attribute *attr,
452 const char *buf, size_t count)
456 struct Scsi_Host *shost = class_to_shost(dev);
459 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
461 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
462 strncpy(tmpbuf, buf, len);
464 if (sscanf(tmpbuf, "%d", &status) != 1)
466 h = shost_to_hba(shost);
467 h->acciopath_status = !!status;
468 dev_warn(&h->pdev->dev,
469 "hpsa: HP SSD Smart Path %s via sysfs update.\n",
470 h->acciopath_status ? "enabled" : "disabled");
474 static ssize_t host_store_raid_offload_debug(struct device *dev,
475 struct device_attribute *attr,
476 const char *buf, size_t count)
478 int debug_level, len;
480 struct Scsi_Host *shost = class_to_shost(dev);
483 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
485 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
486 strncpy(tmpbuf, buf, len);
488 if (sscanf(tmpbuf, "%d", &debug_level) != 1)
492 h = shost_to_hba(shost);
493 h->raid_offload_debug = debug_level;
494 dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
495 h->raid_offload_debug);
499 static ssize_t host_store_rescan(struct device *dev,
500 struct device_attribute *attr,
501 const char *buf, size_t count)
504 struct Scsi_Host *shost = class_to_shost(dev);
505 h = shost_to_hba(shost);
506 hpsa_scan_start(h->scsi_host);
510 static void hpsa_turn_off_ioaccel_for_device(struct hpsa_scsi_dev_t *device)
512 device->offload_enabled = 0;
513 device->offload_to_be_enabled = 0;
516 static ssize_t host_show_firmware_revision(struct device *dev,
517 struct device_attribute *attr, char *buf)
520 struct Scsi_Host *shost = class_to_shost(dev);
521 unsigned char *fwrev;
523 h = shost_to_hba(shost);
524 if (!h->hba_inquiry_data)
526 fwrev = &h->hba_inquiry_data[32];
527 return snprintf(buf, 20, "%c%c%c%c\n",
528 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
531 static ssize_t host_show_commands_outstanding(struct device *dev,
532 struct device_attribute *attr, char *buf)
534 struct Scsi_Host *shost = class_to_shost(dev);
535 struct ctlr_info *h = shost_to_hba(shost);
537 return snprintf(buf, 20, "%d\n",
538 atomic_read(&h->commands_outstanding));
541 static ssize_t host_show_transport_mode(struct device *dev,
542 struct device_attribute *attr, char *buf)
545 struct Scsi_Host *shost = class_to_shost(dev);
547 h = shost_to_hba(shost);
548 return snprintf(buf, 20, "%s\n",
549 h->transMethod & CFGTBL_Trans_Performant ?
550 "performant" : "simple");
553 static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
554 struct device_attribute *attr, char *buf)
557 struct Scsi_Host *shost = class_to_shost(dev);
559 h = shost_to_hba(shost);
560 return snprintf(buf, 30, "HP SSD Smart Path %s\n",
561 (h->acciopath_status == 1) ? "enabled" : "disabled");
564 /* List of controllers which cannot be hard reset on kexec with reset_devices */
565 static u32 unresettable_controller[] = {
566 0x324a103C, /* Smart Array P712m */
567 0x324b103C, /* Smart Array P711m */
568 0x3223103C, /* Smart Array P800 */
569 0x3234103C, /* Smart Array P400 */
570 0x3235103C, /* Smart Array P400i */
571 0x3211103C, /* Smart Array E200i */
572 0x3212103C, /* Smart Array E200 */
573 0x3213103C, /* Smart Array E200i */
574 0x3214103C, /* Smart Array E200i */
575 0x3215103C, /* Smart Array E200i */
576 0x3237103C, /* Smart Array E500 */
577 0x323D103C, /* Smart Array P700m */
578 0x40800E11, /* Smart Array 5i */
579 0x409C0E11, /* Smart Array 6400 */
580 0x409D0E11, /* Smart Array 6400 EM */
581 0x40700E11, /* Smart Array 5300 */
582 0x40820E11, /* Smart Array 532 */
583 0x40830E11, /* Smart Array 5312 */
584 0x409A0E11, /* Smart Array 641 */
585 0x409B0E11, /* Smart Array 642 */
586 0x40910E11, /* Smart Array 6i */
589 /* List of controllers which cannot even be soft reset */
590 static u32 soft_unresettable_controller[] = {
591 0x40800E11, /* Smart Array 5i */
592 0x40700E11, /* Smart Array 5300 */
593 0x40820E11, /* Smart Array 532 */
594 0x40830E11, /* Smart Array 5312 */
595 0x409A0E11, /* Smart Array 641 */
596 0x409B0E11, /* Smart Array 642 */
597 0x40910E11, /* Smart Array 6i */
598 /* Exclude 640x boards. These are two pci devices in one slot
599 * which share a battery backed cache module. One controls the
600 * cache, the other accesses the cache through the one that controls
601 * it. If we reset the one controlling the cache, the other will
602 * likely not be happy. Just forbid resetting this conjoined mess.
603 * The 640x isn't really supported by hpsa anyway.
605 0x409C0E11, /* Smart Array 6400 */
606 0x409D0E11, /* Smart Array 6400 EM */
609 static int board_id_in_array(u32 a[], int nelems, u32 board_id)
613 for (i = 0; i < nelems; i++)
614 if (a[i] == board_id)
619 static int ctlr_is_hard_resettable(u32 board_id)
621 return !board_id_in_array(unresettable_controller,
622 ARRAY_SIZE(unresettable_controller), board_id);
625 static int ctlr_is_soft_resettable(u32 board_id)
627 return !board_id_in_array(soft_unresettable_controller,
628 ARRAY_SIZE(soft_unresettable_controller), board_id);
631 static int ctlr_is_resettable(u32 board_id)
633 return ctlr_is_hard_resettable(board_id) ||
634 ctlr_is_soft_resettable(board_id);
637 static ssize_t host_show_resettable(struct device *dev,
638 struct device_attribute *attr, char *buf)
641 struct Scsi_Host *shost = class_to_shost(dev);
643 h = shost_to_hba(shost);
644 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
647 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
649 return (scsi3addr[3] & 0xC0) == 0x40;
652 static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6",
653 "1(+0)ADM", "UNKNOWN", "PHYS DRV"
655 #define HPSA_RAID_0 0
656 #define HPSA_RAID_4 1
657 #define HPSA_RAID_1 2 /* also used for RAID 10 */
658 #define HPSA_RAID_5 3 /* also used for RAID 50 */
659 #define HPSA_RAID_51 4
660 #define HPSA_RAID_6 5 /* also used for RAID 60 */
661 #define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
662 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 2)
663 #define PHYSICAL_DRIVE (ARRAY_SIZE(raid_label) - 1)
665 static inline bool is_logical_device(struct hpsa_scsi_dev_t *device)
667 return !device->physical_device;
670 static ssize_t raid_level_show(struct device *dev,
671 struct device_attribute *attr, char *buf)
674 unsigned char rlevel;
676 struct scsi_device *sdev;
677 struct hpsa_scsi_dev_t *hdev;
680 sdev = to_scsi_device(dev);
681 h = sdev_to_hba(sdev);
682 spin_lock_irqsave(&h->lock, flags);
683 hdev = sdev->hostdata;
685 spin_unlock_irqrestore(&h->lock, flags);
689 /* Is this even a logical drive? */
690 if (!is_logical_device(hdev)) {
691 spin_unlock_irqrestore(&h->lock, flags);
692 l = snprintf(buf, PAGE_SIZE, "N/A\n");
696 rlevel = hdev->raid_level;
697 spin_unlock_irqrestore(&h->lock, flags);
698 if (rlevel > RAID_UNKNOWN)
699 rlevel = RAID_UNKNOWN;
700 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
704 static ssize_t lunid_show(struct device *dev,
705 struct device_attribute *attr, char *buf)
708 struct scsi_device *sdev;
709 struct hpsa_scsi_dev_t *hdev;
711 unsigned char lunid[8];
713 sdev = to_scsi_device(dev);
714 h = sdev_to_hba(sdev);
715 spin_lock_irqsave(&h->lock, flags);
716 hdev = sdev->hostdata;
718 spin_unlock_irqrestore(&h->lock, flags);
721 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
722 spin_unlock_irqrestore(&h->lock, flags);
723 return snprintf(buf, 20, "0x%8phN\n", lunid);
726 static ssize_t unique_id_show(struct device *dev,
727 struct device_attribute *attr, char *buf)
730 struct scsi_device *sdev;
731 struct hpsa_scsi_dev_t *hdev;
733 unsigned char sn[16];
735 sdev = to_scsi_device(dev);
736 h = sdev_to_hba(sdev);
737 spin_lock_irqsave(&h->lock, flags);
738 hdev = sdev->hostdata;
740 spin_unlock_irqrestore(&h->lock, flags);
743 memcpy(sn, hdev->device_id, sizeof(sn));
744 spin_unlock_irqrestore(&h->lock, flags);
745 return snprintf(buf, 16 * 2 + 2,
746 "%02X%02X%02X%02X%02X%02X%02X%02X"
747 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
748 sn[0], sn[1], sn[2], sn[3],
749 sn[4], sn[5], sn[6], sn[7],
750 sn[8], sn[9], sn[10], sn[11],
751 sn[12], sn[13], sn[14], sn[15]);
754 static ssize_t sas_address_show(struct device *dev,
755 struct device_attribute *attr, char *buf)
758 struct scsi_device *sdev;
759 struct hpsa_scsi_dev_t *hdev;
763 sdev = to_scsi_device(dev);
764 h = sdev_to_hba(sdev);
765 spin_lock_irqsave(&h->lock, flags);
766 hdev = sdev->hostdata;
767 if (!hdev || is_logical_device(hdev) || !hdev->expose_device) {
768 spin_unlock_irqrestore(&h->lock, flags);
771 sas_address = hdev->sas_address;
772 spin_unlock_irqrestore(&h->lock, flags);
774 return snprintf(buf, PAGE_SIZE, "0x%016llx\n", sas_address);
777 static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
778 struct device_attribute *attr, char *buf)
781 struct scsi_device *sdev;
782 struct hpsa_scsi_dev_t *hdev;
786 sdev = to_scsi_device(dev);
787 h = sdev_to_hba(sdev);
788 spin_lock_irqsave(&h->lock, flags);
789 hdev = sdev->hostdata;
791 spin_unlock_irqrestore(&h->lock, flags);
794 offload_enabled = hdev->offload_enabled;
795 spin_unlock_irqrestore(&h->lock, flags);
797 if (hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC)
798 return snprintf(buf, 20, "%d\n", offload_enabled);
800 return snprintf(buf, 40, "%s\n",
801 "Not applicable for a controller");
805 static ssize_t path_info_show(struct device *dev,
806 struct device_attribute *attr, char *buf)
809 struct scsi_device *sdev;
810 struct hpsa_scsi_dev_t *hdev;
816 u8 path_map_index = 0;
818 unsigned char phys_connector[2];
820 sdev = to_scsi_device(dev);
821 h = sdev_to_hba(sdev);
822 spin_lock_irqsave(&h->devlock, flags);
823 hdev = sdev->hostdata;
825 spin_unlock_irqrestore(&h->devlock, flags);
830 for (i = 0; i < MAX_PATHS; i++) {
831 path_map_index = 1<<i;
832 if (i == hdev->active_path_index)
834 else if (hdev->path_map & path_map_index)
839 output_len += scnprintf(buf + output_len,
840 PAGE_SIZE - output_len,
841 "[%d:%d:%d:%d] %20.20s ",
842 h->scsi_host->host_no,
843 hdev->bus, hdev->target, hdev->lun,
844 scsi_device_type(hdev->devtype));
846 if (hdev->devtype == TYPE_RAID || is_logical_device(hdev)) {
847 output_len += scnprintf(buf + output_len,
848 PAGE_SIZE - output_len,
854 memcpy(&phys_connector, &hdev->phys_connector[i],
855 sizeof(phys_connector));
856 if (phys_connector[0] < '0')
857 phys_connector[0] = '0';
858 if (phys_connector[1] < '0')
859 phys_connector[1] = '0';
860 output_len += scnprintf(buf + output_len,
861 PAGE_SIZE - output_len,
864 if ((hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC) &&
865 hdev->expose_device) {
866 if (box == 0 || box == 0xFF) {
867 output_len += scnprintf(buf + output_len,
868 PAGE_SIZE - output_len,
872 output_len += scnprintf(buf + output_len,
873 PAGE_SIZE - output_len,
874 "BOX: %hhu BAY: %hhu %s\n",
877 } else if (box != 0 && box != 0xFF) {
878 output_len += scnprintf(buf + output_len,
879 PAGE_SIZE - output_len, "BOX: %hhu %s\n",
882 output_len += scnprintf(buf + output_len,
883 PAGE_SIZE - output_len, "%s\n", active);
886 spin_unlock_irqrestore(&h->devlock, flags);
890 static ssize_t host_show_ctlr_num(struct device *dev,
891 struct device_attribute *attr, char *buf)
894 struct Scsi_Host *shost = class_to_shost(dev);
896 h = shost_to_hba(shost);
897 return snprintf(buf, 20, "%d\n", h->ctlr);
900 static ssize_t host_show_legacy_board(struct device *dev,
901 struct device_attribute *attr, char *buf)
904 struct Scsi_Host *shost = class_to_shost(dev);
906 h = shost_to_hba(shost);
907 return snprintf(buf, 20, "%d\n", h->legacy_board ? 1 : 0);
910 static DEVICE_ATTR_RO(raid_level);
911 static DEVICE_ATTR_RO(lunid);
912 static DEVICE_ATTR_RO(unique_id);
913 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
914 static DEVICE_ATTR_RO(sas_address);
915 static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
916 host_show_hp_ssd_smart_path_enabled, NULL);
917 static DEVICE_ATTR_RO(path_info);
918 static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
919 host_show_hp_ssd_smart_path_status,
920 host_store_hp_ssd_smart_path_status);
921 static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
922 host_store_raid_offload_debug);
923 static DEVICE_ATTR(firmware_revision, S_IRUGO,
924 host_show_firmware_revision, NULL);
925 static DEVICE_ATTR(commands_outstanding, S_IRUGO,
926 host_show_commands_outstanding, NULL);
927 static DEVICE_ATTR(transport_mode, S_IRUGO,
928 host_show_transport_mode, NULL);
929 static DEVICE_ATTR(resettable, S_IRUGO,
930 host_show_resettable, NULL);
931 static DEVICE_ATTR(lockup_detected, S_IRUGO,
932 host_show_lockup_detected, NULL);
933 static DEVICE_ATTR(ctlr_num, S_IRUGO,
934 host_show_ctlr_num, NULL);
935 static DEVICE_ATTR(legacy_board, S_IRUGO,
936 host_show_legacy_board, NULL);
938 static struct device_attribute *hpsa_sdev_attrs[] = {
939 &dev_attr_raid_level,
942 &dev_attr_hp_ssd_smart_path_enabled,
944 &dev_attr_sas_address,
948 static struct device_attribute *hpsa_shost_attrs[] = {
950 &dev_attr_firmware_revision,
951 &dev_attr_commands_outstanding,
952 &dev_attr_transport_mode,
953 &dev_attr_resettable,
954 &dev_attr_hp_ssd_smart_path_status,
955 &dev_attr_raid_offload_debug,
956 &dev_attr_lockup_detected,
958 &dev_attr_legacy_board,
962 #define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_DRIVER +\
963 HPSA_MAX_CONCURRENT_PASSTHRUS)
965 static struct scsi_host_template hpsa_driver_template = {
966 .module = THIS_MODULE,
969 .queuecommand = hpsa_scsi_queue_command,
970 .scan_start = hpsa_scan_start,
971 .scan_finished = hpsa_scan_finished,
972 .change_queue_depth = hpsa_change_queue_depth,
974 .use_clustering = ENABLE_CLUSTERING,
975 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
977 .slave_alloc = hpsa_slave_alloc,
978 .slave_configure = hpsa_slave_configure,
979 .slave_destroy = hpsa_slave_destroy,
981 .compat_ioctl = hpsa_compat_ioctl,
983 .sdev_attrs = hpsa_sdev_attrs,
984 .shost_attrs = hpsa_shost_attrs,
989 static inline u32 next_command(struct ctlr_info *h, u8 q)
992 struct reply_queue_buffer *rq = &h->reply_queue[q];
994 if (h->transMethod & CFGTBL_Trans_io_accel1)
995 return h->access.command_completed(h, q);
997 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
998 return h->access.command_completed(h, q);
1000 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
1001 a = rq->head[rq->current_entry];
1002 rq->current_entry++;
1003 atomic_dec(&h->commands_outstanding);
1007 /* Check for wraparound */
1008 if (rq->current_entry == h->max_commands) {
1009 rq->current_entry = 0;
1010 rq->wraparound ^= 1;
1016 * There are some special bits in the bus address of the
1017 * command that we have to set for the controller to know
1018 * how to process the command:
1020 * Normal performant mode:
1021 * bit 0: 1 means performant mode, 0 means simple mode.
1022 * bits 1-3 = block fetch table entry
1023 * bits 4-6 = command type (== 0)
1026 * bit 0 = "performant mode" bit.
1027 * bits 1-3 = block fetch table entry
1028 * bits 4-6 = command type (== 110)
1029 * (command type is needed because ioaccel1 mode
1030 * commands are submitted through the same register as normal
1031 * mode commands, so this is how the controller knows whether
1032 * the command is normal mode or ioaccel1 mode.)
1035 * bit 0 = "performant mode" bit.
1036 * bits 1-4 = block fetch table entry (note extra bit)
1037 * bits 4-6 = not needed, because ioaccel2 mode has
1038 * a separate special register for submitting commands.
1042 * set_performant_mode: Modify the tag for cciss performant
1043 * set bit 0 for pull model, bits 3-1 for block fetch
1046 #define DEFAULT_REPLY_QUEUE (-1)
1047 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
1050 if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
1051 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
1052 if (unlikely(!h->msix_vectors))
1054 c->Header.ReplyQueue = reply_queue;
1058 static void set_ioaccel1_performant_mode(struct ctlr_info *h,
1059 struct CommandList *c,
1062 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
1065 * Tell the controller to post the reply to the queue for this
1066 * processor. This seems to give the best I/O throughput.
1068 cp->ReplyQueue = reply_queue;
1070 * Set the bits in the address sent down to include:
1071 * - performant mode bit (bit 0)
1072 * - pull count (bits 1-3)
1073 * - command type (bits 4-6)
1075 c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
1076 IOACCEL1_BUSADDR_CMDTYPE;
1079 static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h,
1080 struct CommandList *c,
1083 struct hpsa_tmf_struct *cp = (struct hpsa_tmf_struct *)
1084 &h->ioaccel2_cmd_pool[c->cmdindex];
1086 /* Tell the controller to post the reply to the queue for this
1087 * processor. This seems to give the best I/O throughput.
1089 cp->reply_queue = reply_queue;
1090 /* Set the bits in the address sent down to include:
1091 * - performant mode bit not used in ioaccel mode 2
1092 * - pull count (bits 0-3)
1093 * - command type isn't needed for ioaccel2
1095 c->busaddr |= h->ioaccel2_blockFetchTable[0];
1098 static void set_ioaccel2_performant_mode(struct ctlr_info *h,
1099 struct CommandList *c,
1102 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
1105 * Tell the controller to post the reply to the queue for this
1106 * processor. This seems to give the best I/O throughput.
1108 cp->reply_queue = reply_queue;
1110 * Set the bits in the address sent down to include:
1111 * - performant mode bit not used in ioaccel mode 2
1112 * - pull count (bits 0-3)
1113 * - command type isn't needed for ioaccel2
1115 c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
1118 static int is_firmware_flash_cmd(u8 *cdb)
1120 return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
1124 * During firmware flash, the heartbeat register may not update as frequently
1125 * as it should. So we dial down lockup detection during firmware flash. and
1126 * dial it back up when firmware flash completes.
1128 #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
1129 #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
1130 #define HPSA_EVENT_MONITOR_INTERVAL (15 * HZ)
1131 static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
1132 struct CommandList *c)
1134 if (!is_firmware_flash_cmd(c->Request.CDB))
1136 atomic_inc(&h->firmware_flash_in_progress);
1137 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
1140 static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
1141 struct CommandList *c)
1143 if (is_firmware_flash_cmd(c->Request.CDB) &&
1144 atomic_dec_and_test(&h->firmware_flash_in_progress))
1145 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
1148 static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
1149 struct CommandList *c, int reply_queue)
1151 dial_down_lockup_detection_during_fw_flash(h, c);
1152 atomic_inc(&h->commands_outstanding);
1154 reply_queue = h->reply_map[raw_smp_processor_id()];
1155 switch (c->cmd_type) {
1157 set_ioaccel1_performant_mode(h, c, reply_queue);
1158 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
1161 set_ioaccel2_performant_mode(h, c, reply_queue);
1162 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
1165 set_ioaccel2_tmf_performant_mode(h, c, reply_queue);
1166 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
1169 set_performant_mode(h, c, reply_queue);
1170 h->access.submit_command(h, c);
1174 static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c)
1176 if (unlikely(hpsa_is_pending_event(c)))
1177 return finish_cmd(c);
1179 __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE);
1182 static inline int is_hba_lunid(unsigned char scsi3addr[])
1184 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
1187 static inline int is_scsi_rev_5(struct ctlr_info *h)
1189 if (!h->hba_inquiry_data)
1191 if ((h->hba_inquiry_data[2] & 0x07) == 5)
1196 static int hpsa_find_target_lun(struct ctlr_info *h,
1197 unsigned char scsi3addr[], int bus, int *target, int *lun)
1199 /* finds an unused bus, target, lun for a new physical device
1200 * assumes h->devlock is held
1203 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
1205 bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
1207 for (i = 0; i < h->ndevices; i++) {
1208 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
1209 __set_bit(h->dev[i]->target, lun_taken);
1212 i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
1213 if (i < HPSA_MAX_DEVICES) {
1222 static void hpsa_show_dev_msg(const char *level, struct ctlr_info *h,
1223 struct hpsa_scsi_dev_t *dev, char *description)
1225 #define LABEL_SIZE 25
1226 char label[LABEL_SIZE];
1228 if (h == NULL || h->pdev == NULL || h->scsi_host == NULL)
1231 switch (dev->devtype) {
1233 snprintf(label, LABEL_SIZE, "controller");
1235 case TYPE_ENCLOSURE:
1236 snprintf(label, LABEL_SIZE, "enclosure");
1241 snprintf(label, LABEL_SIZE, "external");
1242 else if (!is_logical_dev_addr_mode(dev->scsi3addr))
1243 snprintf(label, LABEL_SIZE, "%s",
1244 raid_label[PHYSICAL_DRIVE]);
1246 snprintf(label, LABEL_SIZE, "RAID-%s",
1247 dev->raid_level > RAID_UNKNOWN ? "?" :
1248 raid_label[dev->raid_level]);
1251 snprintf(label, LABEL_SIZE, "rom");
1254 snprintf(label, LABEL_SIZE, "tape");
1256 case TYPE_MEDIUM_CHANGER:
1257 snprintf(label, LABEL_SIZE, "changer");
1260 snprintf(label, LABEL_SIZE, "UNKNOWN");
1264 dev_printk(level, &h->pdev->dev,
1265 "scsi %d:%d:%d:%d: %s %s %.8s %.16s %s SSDSmartPathCap%c En%c Exp=%d\n",
1266 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
1268 scsi_device_type(dev->devtype),
1272 dev->offload_config ? '+' : '-',
1273 dev->offload_to_be_enabled ? '+' : '-',
1274 dev->expose_device);
1277 /* Add an entry into h->dev[] array. */
1278 static int hpsa_scsi_add_entry(struct ctlr_info *h,
1279 struct hpsa_scsi_dev_t *device,
1280 struct hpsa_scsi_dev_t *added[], int *nadded)
1282 /* assumes h->devlock is held */
1283 int n = h->ndevices;
1285 unsigned char addr1[8], addr2[8];
1286 struct hpsa_scsi_dev_t *sd;
1288 if (n >= HPSA_MAX_DEVICES) {
1289 dev_err(&h->pdev->dev, "too many devices, some will be "
1294 /* physical devices do not have lun or target assigned until now. */
1295 if (device->lun != -1)
1296 /* Logical device, lun is already assigned. */
1299 /* If this device a non-zero lun of a multi-lun device
1300 * byte 4 of the 8-byte LUN addr will contain the logical
1301 * unit no, zero otherwise.
1303 if (device->scsi3addr[4] == 0) {
1304 /* This is not a non-zero lun of a multi-lun device */
1305 if (hpsa_find_target_lun(h, device->scsi3addr,
1306 device->bus, &device->target, &device->lun) != 0)
1311 /* This is a non-zero lun of a multi-lun device.
1312 * Search through our list and find the device which
1313 * has the same 8 byte LUN address, excepting byte 4 and 5.
1314 * Assign the same bus and target for this new LUN.
1315 * Use the logical unit number from the firmware.
1317 memcpy(addr1, device->scsi3addr, 8);
1320 for (i = 0; i < n; i++) {
1322 memcpy(addr2, sd->scsi3addr, 8);
1325 /* differ only in byte 4 and 5? */
1326 if (memcmp(addr1, addr2, 8) == 0) {
1327 device->bus = sd->bus;
1328 device->target = sd->target;
1329 device->lun = device->scsi3addr[4];
1333 if (device->lun == -1) {
1334 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
1335 " suspect firmware bug or unsupported hardware "
1336 "configuration.\n");
1344 added[*nadded] = device;
1346 hpsa_show_dev_msg(KERN_INFO, h, device,
1347 device->expose_device ? "added" : "masked");
1352 * Called during a scan operation.
1354 * Update an entry in h->dev[] array.
1356 static void hpsa_scsi_update_entry(struct ctlr_info *h,
1357 int entry, struct hpsa_scsi_dev_t *new_entry)
1359 /* assumes h->devlock is held */
1360 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1362 /* Raid level changed. */
1363 h->dev[entry]->raid_level = new_entry->raid_level;
1366 * ioacccel_handle may have changed for a dual domain disk
1368 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1370 /* Raid offload parameters changed. Careful about the ordering. */
1371 if (new_entry->offload_config && new_entry->offload_to_be_enabled) {
1373 * if drive is newly offload_enabled, we want to copy the
1374 * raid map data first. If previously offload_enabled and
1375 * offload_config were set, raid map data had better be
1376 * the same as it was before. If raid map data has changed
1377 * then it had better be the case that
1378 * h->dev[entry]->offload_enabled is currently 0.
1380 h->dev[entry]->raid_map = new_entry->raid_map;
1381 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1383 if (new_entry->offload_to_be_enabled) {
1384 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1385 wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */
1387 h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled;
1388 h->dev[entry]->offload_config = new_entry->offload_config;
1389 h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
1390 h->dev[entry]->queue_depth = new_entry->queue_depth;
1393 * We can turn off ioaccel offload now, but need to delay turning
1394 * ioaccel on until we can update h->dev[entry]->phys_disk[], but we
1395 * can't do that until all the devices are updated.
1397 h->dev[entry]->offload_to_be_enabled = new_entry->offload_to_be_enabled;
1400 * turn ioaccel off immediately if told to do so.
1402 if (!new_entry->offload_to_be_enabled)
1403 h->dev[entry]->offload_enabled = 0;
1405 hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated");
1408 /* Replace an entry from h->dev[] array. */
1409 static void hpsa_scsi_replace_entry(struct ctlr_info *h,
1410 int entry, struct hpsa_scsi_dev_t *new_entry,
1411 struct hpsa_scsi_dev_t *added[], int *nadded,
1412 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1414 /* assumes h->devlock is held */
1415 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1416 removed[*nremoved] = h->dev[entry];
1420 * New physical devices won't have target/lun assigned yet
1421 * so we need to preserve the values in the slot we are replacing.
1423 if (new_entry->target == -1) {
1424 new_entry->target = h->dev[entry]->target;
1425 new_entry->lun = h->dev[entry]->lun;
1428 h->dev[entry] = new_entry;
1429 added[*nadded] = new_entry;
1432 hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced");
1435 /* Remove an entry from h->dev[] array. */
1436 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int entry,
1437 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1439 /* assumes h->devlock is held */
1441 struct hpsa_scsi_dev_t *sd;
1443 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1446 removed[*nremoved] = h->dev[entry];
1449 for (i = entry; i < h->ndevices-1; i++)
1450 h->dev[i] = h->dev[i+1];
1452 hpsa_show_dev_msg(KERN_INFO, h, sd, "removed");
1455 #define SCSI3ADDR_EQ(a, b) ( \
1456 (a)[7] == (b)[7] && \
1457 (a)[6] == (b)[6] && \
1458 (a)[5] == (b)[5] && \
1459 (a)[4] == (b)[4] && \
1460 (a)[3] == (b)[3] && \
1461 (a)[2] == (b)[2] && \
1462 (a)[1] == (b)[1] && \
1465 static void fixup_botched_add(struct ctlr_info *h,
1466 struct hpsa_scsi_dev_t *added)
1468 /* called when scsi_add_device fails in order to re-adjust
1469 * h->dev[] to match the mid layer's view.
1471 unsigned long flags;
1474 spin_lock_irqsave(&h->lock, flags);
1475 for (i = 0; i < h->ndevices; i++) {
1476 if (h->dev[i] == added) {
1477 for (j = i; j < h->ndevices-1; j++)
1478 h->dev[j] = h->dev[j+1];
1483 spin_unlock_irqrestore(&h->lock, flags);
1487 static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
1488 struct hpsa_scsi_dev_t *dev2)
1490 /* we compare everything except lun and target as these
1491 * are not yet assigned. Compare parts likely
1494 if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
1495 sizeof(dev1->scsi3addr)) != 0)
1497 if (memcmp(dev1->device_id, dev2->device_id,
1498 sizeof(dev1->device_id)) != 0)
1500 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
1502 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
1504 if (dev1->devtype != dev2->devtype)
1506 if (dev1->bus != dev2->bus)
1511 static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
1512 struct hpsa_scsi_dev_t *dev2)
1514 /* Device attributes that can change, but don't mean
1515 * that the device is a different device, nor that the OS
1516 * needs to be told anything about the change.
1518 if (dev1->raid_level != dev2->raid_level)
1520 if (dev1->offload_config != dev2->offload_config)
1522 if (dev1->offload_to_be_enabled != dev2->offload_to_be_enabled)
1524 if (!is_logical_dev_addr_mode(dev1->scsi3addr))
1525 if (dev1->queue_depth != dev2->queue_depth)
1528 * This can happen for dual domain devices. An active
1529 * path change causes the ioaccel handle to change
1531 * for example note the handle differences between p0 and p1
1532 * Device WWN ,WWN hash,Handle
1533 * D016 p0|0x3 [02]P2E:01:01,0x5000C5005FC4DACA,0x9B5616,0x01030003
1534 * p1 0x5000C5005FC4DAC9,0x6798C0,0x00040004
1536 if (dev1->ioaccel_handle != dev2->ioaccel_handle)
1541 /* Find needle in haystack. If exact match found, return DEVICE_SAME,
1542 * and return needle location in *index. If scsi3addr matches, but not
1543 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
1544 * location in *index.
1545 * In the case of a minor device attribute change, such as RAID level, just
1546 * return DEVICE_UPDATED, along with the updated device's location in index.
1547 * If needle not found, return DEVICE_NOT_FOUND.
1549 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
1550 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
1554 #define DEVICE_NOT_FOUND 0
1555 #define DEVICE_CHANGED 1
1556 #define DEVICE_SAME 2
1557 #define DEVICE_UPDATED 3
1559 return DEVICE_NOT_FOUND;
1561 for (i = 0; i < haystack_size; i++) {
1562 if (haystack[i] == NULL) /* previously removed. */
1564 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
1566 if (device_is_the_same(needle, haystack[i])) {
1567 if (device_updated(needle, haystack[i]))
1568 return DEVICE_UPDATED;
1571 /* Keep offline devices offline */
1572 if (needle->volume_offline)
1573 return DEVICE_NOT_FOUND;
1574 return DEVICE_CHANGED;
1579 return DEVICE_NOT_FOUND;
1582 static void hpsa_monitor_offline_device(struct ctlr_info *h,
1583 unsigned char scsi3addr[])
1585 struct offline_device_entry *device;
1586 unsigned long flags;
1588 /* Check to see if device is already on the list */
1589 spin_lock_irqsave(&h->offline_device_lock, flags);
1590 list_for_each_entry(device, &h->offline_device_list, offline_list) {
1591 if (memcmp(device->scsi3addr, scsi3addr,
1592 sizeof(device->scsi3addr)) == 0) {
1593 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1597 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1599 /* Device is not on the list, add it. */
1600 device = kmalloc(sizeof(*device), GFP_KERNEL);
1604 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1605 spin_lock_irqsave(&h->offline_device_lock, flags);
1606 list_add_tail(&device->offline_list, &h->offline_device_list);
1607 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1610 /* Print a message explaining various offline volume states */
1611 static void hpsa_show_volume_status(struct ctlr_info *h,
1612 struct hpsa_scsi_dev_t *sd)
1614 if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
1615 dev_info(&h->pdev->dev,
1616 "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1617 h->scsi_host->host_no,
1618 sd->bus, sd->target, sd->lun);
1619 switch (sd->volume_offline) {
1622 case HPSA_LV_UNDERGOING_ERASE:
1623 dev_info(&h->pdev->dev,
1624 "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1625 h->scsi_host->host_no,
1626 sd->bus, sd->target, sd->lun);
1628 case HPSA_LV_NOT_AVAILABLE:
1629 dev_info(&h->pdev->dev,
1630 "C%d:B%d:T%d:L%d Volume is waiting for transforming volume.\n",
1631 h->scsi_host->host_no,
1632 sd->bus, sd->target, sd->lun);
1634 case HPSA_LV_UNDERGOING_RPI:
1635 dev_info(&h->pdev->dev,
1636 "C%d:B%d:T%d:L%d Volume is undergoing rapid parity init.\n",
1637 h->scsi_host->host_no,
1638 sd->bus, sd->target, sd->lun);
1640 case HPSA_LV_PENDING_RPI:
1641 dev_info(&h->pdev->dev,
1642 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1643 h->scsi_host->host_no,
1644 sd->bus, sd->target, sd->lun);
1646 case HPSA_LV_ENCRYPTED_NO_KEY:
1647 dev_info(&h->pdev->dev,
1648 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1649 h->scsi_host->host_no,
1650 sd->bus, sd->target, sd->lun);
1652 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1653 dev_info(&h->pdev->dev,
1654 "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1655 h->scsi_host->host_no,
1656 sd->bus, sd->target, sd->lun);
1658 case HPSA_LV_UNDERGOING_ENCRYPTION:
1659 dev_info(&h->pdev->dev,
1660 "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1661 h->scsi_host->host_no,
1662 sd->bus, sd->target, sd->lun);
1664 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1665 dev_info(&h->pdev->dev,
1666 "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1667 h->scsi_host->host_no,
1668 sd->bus, sd->target, sd->lun);
1670 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1671 dev_info(&h->pdev->dev,
1672 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1673 h->scsi_host->host_no,
1674 sd->bus, sd->target, sd->lun);
1676 case HPSA_LV_PENDING_ENCRYPTION:
1677 dev_info(&h->pdev->dev,
1678 "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1679 h->scsi_host->host_no,
1680 sd->bus, sd->target, sd->lun);
1682 case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
1683 dev_info(&h->pdev->dev,
1684 "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1685 h->scsi_host->host_no,
1686 sd->bus, sd->target, sd->lun);
1692 * Figure the list of physical drive pointers for a logical drive with
1693 * raid offload configured.
1695 static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
1696 struct hpsa_scsi_dev_t *dev[], int ndevices,
1697 struct hpsa_scsi_dev_t *logical_drive)
1699 struct raid_map_data *map = &logical_drive->raid_map;
1700 struct raid_map_disk_data *dd = &map->data[0];
1702 int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
1703 le16_to_cpu(map->metadata_disks_per_row);
1704 int nraid_map_entries = le16_to_cpu(map->row_cnt) *
1705 le16_to_cpu(map->layout_map_count) *
1706 total_disks_per_row;
1707 int nphys_disk = le16_to_cpu(map->layout_map_count) *
1708 total_disks_per_row;
1711 if (nraid_map_entries > RAID_MAP_MAX_ENTRIES)
1712 nraid_map_entries = RAID_MAP_MAX_ENTRIES;
1714 logical_drive->nphysical_disks = nraid_map_entries;
1717 for (i = 0; i < nraid_map_entries; i++) {
1718 logical_drive->phys_disk[i] = NULL;
1719 if (!logical_drive->offload_config)
1721 for (j = 0; j < ndevices; j++) {
1724 if (dev[j]->devtype != TYPE_DISK &&
1725 dev[j]->devtype != TYPE_ZBC)
1727 if (is_logical_device(dev[j]))
1729 if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle)
1732 logical_drive->phys_disk[i] = dev[j];
1734 qdepth = min(h->nr_cmds, qdepth +
1735 logical_drive->phys_disk[i]->queue_depth);
1740 * This can happen if a physical drive is removed and
1741 * the logical drive is degraded. In that case, the RAID
1742 * map data will refer to a physical disk which isn't actually
1743 * present. And in that case offload_enabled should already
1744 * be 0, but we'll turn it off here just in case
1746 if (!logical_drive->phys_disk[i]) {
1747 dev_warn(&h->pdev->dev,
1748 "%s: [%d:%d:%d:%d] A phys disk component of LV is missing, turning off offload_enabled for LV.\n",
1750 h->scsi_host->host_no, logical_drive->bus,
1751 logical_drive->target, logical_drive->lun);
1752 hpsa_turn_off_ioaccel_for_device(logical_drive);
1753 logical_drive->queue_depth = 8;
1756 if (nraid_map_entries)
1758 * This is correct for reads, too high for full stripe writes,
1759 * way too high for partial stripe writes
1761 logical_drive->queue_depth = qdepth;
1763 if (logical_drive->external)
1764 logical_drive->queue_depth = EXTERNAL_QD;
1766 logical_drive->queue_depth = h->nr_cmds;
1770 static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
1771 struct hpsa_scsi_dev_t *dev[], int ndevices)
1775 for (i = 0; i < ndevices; i++) {
1778 if (dev[i]->devtype != TYPE_DISK &&
1779 dev[i]->devtype != TYPE_ZBC)
1781 if (!is_logical_device(dev[i]))
1785 * If offload is currently enabled, the RAID map and
1786 * phys_disk[] assignment *better* not be changing
1787 * because we would be changing ioaccel phsy_disk[] pointers
1788 * on a ioaccel volume processing I/O requests.
1790 * If an ioaccel volume status changed, initially because it was
1791 * re-configured and thus underwent a transformation, or
1792 * a drive failed, we would have received a state change
1793 * request and ioaccel should have been turned off. When the
1794 * transformation completes, we get another state change
1795 * request to turn ioaccel back on. In this case, we need
1796 * to update the ioaccel information.
1798 * Thus: If it is not currently enabled, but will be after
1799 * the scan completes, make sure the ioaccel pointers
1803 if (!dev[i]->offload_enabled && dev[i]->offload_to_be_enabled)
1804 hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
1808 static int hpsa_add_device(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
1815 if (is_logical_device(device)) /* RAID */
1816 rc = scsi_add_device(h->scsi_host, device->bus,
1817 device->target, device->lun);
1819 rc = hpsa_add_sas_device(h->sas_host, device);
1824 static int hpsa_find_outstanding_commands_for_dev(struct ctlr_info *h,
1825 struct hpsa_scsi_dev_t *dev)
1830 for (i = 0; i < h->nr_cmds; i++) {
1831 struct CommandList *c = h->cmd_pool + i;
1832 int refcount = atomic_inc_return(&c->refcount);
1834 if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev,
1836 unsigned long flags;
1838 spin_lock_irqsave(&h->lock, flags); /* Implied MB */
1839 if (!hpsa_is_cmd_idle(c))
1841 spin_unlock_irqrestore(&h->lock, flags);
1850 static void hpsa_wait_for_outstanding_commands_for_dev(struct ctlr_info *h,
1851 struct hpsa_scsi_dev_t *device)
1857 cmds = hpsa_find_outstanding_commands_for_dev(h, device);
1866 dev_warn(&h->pdev->dev,
1867 "%s: removing device with %d outstanding commands!\n",
1871 static void hpsa_remove_device(struct ctlr_info *h,
1872 struct hpsa_scsi_dev_t *device)
1874 struct scsi_device *sdev = NULL;
1880 * Allow for commands to drain
1882 device->removed = 1;
1883 hpsa_wait_for_outstanding_commands_for_dev(h, device);
1885 if (is_logical_device(device)) { /* RAID */
1886 sdev = scsi_device_lookup(h->scsi_host, device->bus,
1887 device->target, device->lun);
1889 scsi_remove_device(sdev);
1890 scsi_device_put(sdev);
1893 * We don't expect to get here. Future commands
1894 * to this device will get a selection timeout as
1895 * if the device were gone.
1897 hpsa_show_dev_msg(KERN_WARNING, h, device,
1898 "didn't find device for removal.");
1902 hpsa_remove_sas_device(device);
1906 static void adjust_hpsa_scsi_table(struct ctlr_info *h,
1907 struct hpsa_scsi_dev_t *sd[], int nsds)
1909 /* sd contains scsi3 addresses and devtypes, and inquiry
1910 * data. This function takes what's in sd to be the current
1911 * reality and updates h->dev[] to reflect that reality.
1913 int i, entry, device_change, changes = 0;
1914 struct hpsa_scsi_dev_t *csd;
1915 unsigned long flags;
1916 struct hpsa_scsi_dev_t **added, **removed;
1917 int nadded, nremoved;
1920 * A reset can cause a device status to change
1921 * re-schedule the scan to see what happened.
1923 spin_lock_irqsave(&h->reset_lock, flags);
1924 if (h->reset_in_progress) {
1925 h->drv_req_rescan = 1;
1926 spin_unlock_irqrestore(&h->reset_lock, flags);
1929 spin_unlock_irqrestore(&h->reset_lock, flags);
1931 added = kcalloc(HPSA_MAX_DEVICES, sizeof(*added), GFP_KERNEL);
1932 removed = kcalloc(HPSA_MAX_DEVICES, sizeof(*removed), GFP_KERNEL);
1934 if (!added || !removed) {
1935 dev_warn(&h->pdev->dev, "out of memory in "
1936 "adjust_hpsa_scsi_table\n");
1940 spin_lock_irqsave(&h->devlock, flags);
1942 /* find any devices in h->dev[] that are not in
1943 * sd[] and remove them from h->dev[], and for any
1944 * devices which have changed, remove the old device
1945 * info and add the new device info.
1946 * If minor device attributes change, just update
1947 * the existing device structure.
1952 while (i < h->ndevices) {
1954 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
1955 if (device_change == DEVICE_NOT_FOUND) {
1957 hpsa_scsi_remove_entry(h, i, removed, &nremoved);
1958 continue; /* remove ^^^, hence i not incremented */
1959 } else if (device_change == DEVICE_CHANGED) {
1961 hpsa_scsi_replace_entry(h, i, sd[entry],
1962 added, &nadded, removed, &nremoved);
1963 /* Set it to NULL to prevent it from being freed
1964 * at the bottom of hpsa_update_scsi_devices()
1967 } else if (device_change == DEVICE_UPDATED) {
1968 hpsa_scsi_update_entry(h, i, sd[entry]);
1973 /* Now, make sure every device listed in sd[] is also
1974 * listed in h->dev[], adding them if they aren't found
1977 for (i = 0; i < nsds; i++) {
1978 if (!sd[i]) /* if already added above. */
1981 /* Don't add devices which are NOT READY, FORMAT IN PROGRESS
1982 * as the SCSI mid-layer does not handle such devices well.
1983 * It relentlessly loops sending TUR at 3Hz, then READ(10)
1984 * at 160Hz, and prevents the system from coming up.
1986 if (sd[i]->volume_offline) {
1987 hpsa_show_volume_status(h, sd[i]);
1988 hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline");
1992 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
1993 h->ndevices, &entry);
1994 if (device_change == DEVICE_NOT_FOUND) {
1996 if (hpsa_scsi_add_entry(h, sd[i], added, &nadded) != 0)
1998 sd[i] = NULL; /* prevent from being freed later. */
1999 } else if (device_change == DEVICE_CHANGED) {
2000 /* should never happen... */
2002 dev_warn(&h->pdev->dev,
2003 "device unexpectedly changed.\n");
2004 /* but if it does happen, we just ignore that device */
2007 hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices);
2010 * Now that h->dev[]->phys_disk[] is coherent, we can enable
2011 * any logical drives that need it enabled.
2013 * The raid map should be current by now.
2015 * We are updating the device list used for I/O requests.
2017 for (i = 0; i < h->ndevices; i++) {
2018 if (h->dev[i] == NULL)
2020 h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled;
2023 spin_unlock_irqrestore(&h->devlock, flags);
2025 /* Monitor devices which are in one of several NOT READY states to be
2026 * brought online later. This must be done without holding h->devlock,
2027 * so don't touch h->dev[]
2029 for (i = 0; i < nsds; i++) {
2030 if (!sd[i]) /* if already added above. */
2032 if (sd[i]->volume_offline)
2033 hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
2036 /* Don't notify scsi mid layer of any changes the first time through
2037 * (or if there are no changes) scsi_scan_host will do it later the
2038 * first time through.
2043 /* Notify scsi mid layer of any removed devices */
2044 for (i = 0; i < nremoved; i++) {
2045 if (removed[i] == NULL)
2047 if (removed[i]->expose_device)
2048 hpsa_remove_device(h, removed[i]);
2053 /* Notify scsi mid layer of any added devices */
2054 for (i = 0; i < nadded; i++) {
2057 if (added[i] == NULL)
2059 if (!(added[i]->expose_device))
2061 rc = hpsa_add_device(h, added[i]);
2064 dev_warn(&h->pdev->dev,
2065 "addition failed %d, device not added.", rc);
2066 /* now we have to remove it from h->dev,
2067 * since it didn't get added to scsi mid layer
2069 fixup_botched_add(h, added[i]);
2070 h->drv_req_rescan = 1;
2079 * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
2080 * Assume's h->devlock is held.
2082 static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
2083 int bus, int target, int lun)
2086 struct hpsa_scsi_dev_t *sd;
2088 for (i = 0; i < h->ndevices; i++) {
2090 if (sd->bus == bus && sd->target == target && sd->lun == lun)
2096 static int hpsa_slave_alloc(struct scsi_device *sdev)
2098 struct hpsa_scsi_dev_t *sd = NULL;
2099 unsigned long flags;
2100 struct ctlr_info *h;
2102 h = sdev_to_hba(sdev);
2103 spin_lock_irqsave(&h->devlock, flags);
2104 if (sdev_channel(sdev) == HPSA_PHYSICAL_DEVICE_BUS) {
2105 struct scsi_target *starget;
2106 struct sas_rphy *rphy;
2108 starget = scsi_target(sdev);
2109 rphy = target_to_rphy(starget);
2110 sd = hpsa_find_device_by_sas_rphy(h, rphy);
2112 sd->target = sdev_id(sdev);
2113 sd->lun = sdev->lun;
2117 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
2118 sdev_id(sdev), sdev->lun);
2120 if (sd && sd->expose_device) {
2121 atomic_set(&sd->ioaccel_cmds_out, 0);
2122 sdev->hostdata = sd;
2124 sdev->hostdata = NULL;
2125 spin_unlock_irqrestore(&h->devlock, flags);
2129 /* configure scsi device based on internal per-device structure */
2130 static int hpsa_slave_configure(struct scsi_device *sdev)
2132 struct hpsa_scsi_dev_t *sd;
2135 sd = sdev->hostdata;
2136 sdev->no_uld_attach = !sd || !sd->expose_device;
2140 queue_depth = EXTERNAL_QD;
2142 queue_depth = sd->queue_depth != 0 ?
2143 sd->queue_depth : sdev->host->can_queue;
2145 queue_depth = sdev->host->can_queue;
2147 scsi_change_queue_depth(sdev, queue_depth);
2152 static void hpsa_slave_destroy(struct scsi_device *sdev)
2154 /* nothing to do. */
2157 static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
2161 if (!h->ioaccel2_cmd_sg_list)
2163 for (i = 0; i < h->nr_cmds; i++) {
2164 kfree(h->ioaccel2_cmd_sg_list[i]);
2165 h->ioaccel2_cmd_sg_list[i] = NULL;
2167 kfree(h->ioaccel2_cmd_sg_list);
2168 h->ioaccel2_cmd_sg_list = NULL;
2171 static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
2175 if (h->chainsize <= 0)
2178 h->ioaccel2_cmd_sg_list =
2179 kcalloc(h->nr_cmds, sizeof(*h->ioaccel2_cmd_sg_list),
2181 if (!h->ioaccel2_cmd_sg_list)
2183 for (i = 0; i < h->nr_cmds; i++) {
2184 h->ioaccel2_cmd_sg_list[i] =
2185 kmalloc_array(h->maxsgentries,
2186 sizeof(*h->ioaccel2_cmd_sg_list[i]),
2188 if (!h->ioaccel2_cmd_sg_list[i])
2194 hpsa_free_ioaccel2_sg_chain_blocks(h);
2198 static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
2202 if (!h->cmd_sg_list)
2204 for (i = 0; i < h->nr_cmds; i++) {
2205 kfree(h->cmd_sg_list[i]);
2206 h->cmd_sg_list[i] = NULL;
2208 kfree(h->cmd_sg_list);
2209 h->cmd_sg_list = NULL;
2212 static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h)
2216 if (h->chainsize <= 0)
2219 h->cmd_sg_list = kcalloc(h->nr_cmds, sizeof(*h->cmd_sg_list),
2221 if (!h->cmd_sg_list)
2224 for (i = 0; i < h->nr_cmds; i++) {
2225 h->cmd_sg_list[i] = kmalloc_array(h->chainsize,
2226 sizeof(*h->cmd_sg_list[i]),
2228 if (!h->cmd_sg_list[i])
2235 hpsa_free_sg_chain_blocks(h);
2239 static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h,
2240 struct io_accel2_cmd *cp, struct CommandList *c)
2242 struct ioaccel2_sg_element *chain_block;
2246 chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex];
2247 chain_size = le32_to_cpu(cp->sg[0].length);
2248 temp64 = pci_map_single(h->pdev, chain_block, chain_size,
2250 if (dma_mapping_error(&h->pdev->dev, temp64)) {
2251 /* prevent subsequent unmapping */
2252 cp->sg->address = 0;
2255 cp->sg->address = cpu_to_le64(temp64);
2259 static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h,
2260 struct io_accel2_cmd *cp)
2262 struct ioaccel2_sg_element *chain_sg;
2267 temp64 = le64_to_cpu(chain_sg->address);
2268 chain_size = le32_to_cpu(cp->sg[0].length);
2269 pci_unmap_single(h->pdev, temp64, chain_size, PCI_DMA_TODEVICE);
2272 static int hpsa_map_sg_chain_block(struct ctlr_info *h,
2273 struct CommandList *c)
2275 struct SGDescriptor *chain_sg, *chain_block;
2279 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
2280 chain_block = h->cmd_sg_list[c->cmdindex];
2281 chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
2282 chain_len = sizeof(*chain_sg) *
2283 (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
2284 chain_sg->Len = cpu_to_le32(chain_len);
2285 temp64 = pci_map_single(h->pdev, chain_block, chain_len,
2287 if (dma_mapping_error(&h->pdev->dev, temp64)) {
2288 /* prevent subsequent unmapping */
2289 chain_sg->Addr = cpu_to_le64(0);
2292 chain_sg->Addr = cpu_to_le64(temp64);
2296 static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
2297 struct CommandList *c)
2299 struct SGDescriptor *chain_sg;
2301 if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries)
2304 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
2305 pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr),
2306 le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE);
2310 /* Decode the various types of errors on ioaccel2 path.
2311 * Return 1 for any error that should generate a RAID path retry.
2312 * Return 0 for errors that don't require a RAID path retry.
2314 static int handle_ioaccel_mode2_error(struct ctlr_info *h,
2315 struct CommandList *c,
2316 struct scsi_cmnd *cmd,
2317 struct io_accel2_cmd *c2,
2318 struct hpsa_scsi_dev_t *dev)
2322 u32 ioaccel2_resid = 0;
2324 switch (c2->error_data.serv_response) {
2325 case IOACCEL2_SERV_RESPONSE_COMPLETE:
2326 switch (c2->error_data.status) {
2327 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
2331 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
2332 cmd->result |= SAM_STAT_CHECK_CONDITION;
2333 if (c2->error_data.data_present !=
2334 IOACCEL2_SENSE_DATA_PRESENT) {
2335 memset(cmd->sense_buffer, 0,
2336 SCSI_SENSE_BUFFERSIZE);
2339 /* copy the sense data */
2340 data_len = c2->error_data.sense_data_len;
2341 if (data_len > SCSI_SENSE_BUFFERSIZE)
2342 data_len = SCSI_SENSE_BUFFERSIZE;
2343 if (data_len > sizeof(c2->error_data.sense_data_buff))
2345 sizeof(c2->error_data.sense_data_buff);
2346 memcpy(cmd->sense_buffer,
2347 c2->error_data.sense_data_buff, data_len);
2350 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
2353 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
2356 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
2359 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
2367 case IOACCEL2_SERV_RESPONSE_FAILURE:
2368 switch (c2->error_data.status) {
2369 case IOACCEL2_STATUS_SR_IO_ERROR:
2370 case IOACCEL2_STATUS_SR_IO_ABORTED:
2371 case IOACCEL2_STATUS_SR_OVERRUN:
2374 case IOACCEL2_STATUS_SR_UNDERRUN:
2375 cmd->result = (DID_OK << 16); /* host byte */
2376 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
2377 ioaccel2_resid = get_unaligned_le32(
2378 &c2->error_data.resid_cnt[0]);
2379 scsi_set_resid(cmd, ioaccel2_resid);
2381 case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE:
2382 case IOACCEL2_STATUS_SR_INVALID_DEVICE:
2383 case IOACCEL2_STATUS_SR_IOACCEL_DISABLED:
2385 * Did an HBA disk disappear? We will eventually
2386 * get a state change event from the controller but
2387 * in the meantime, we need to tell the OS that the
2388 * HBA disk is no longer there and stop I/O
2389 * from going down. This allows the potential re-insert
2390 * of the disk to get the same device node.
2392 if (dev->physical_device && dev->expose_device) {
2393 cmd->result = DID_NO_CONNECT << 16;
2395 h->drv_req_rescan = 1;
2396 dev_warn(&h->pdev->dev,
2397 "%s: device is gone!\n", __func__);
2400 * Retry by sending down the RAID path.
2401 * We will get an event from ctlr to
2402 * trigger rescan regardless.
2410 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
2412 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
2414 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
2417 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
2424 return retry; /* retry on raid path? */
2427 static void hpsa_cmd_resolve_events(struct ctlr_info *h,
2428 struct CommandList *c)
2430 bool do_wake = false;
2433 * Reset c->scsi_cmd here so that the reset handler will know
2434 * this command has completed. Then, check to see if the handler is
2435 * waiting for this command, and, if so, wake it.
2437 c->scsi_cmd = SCSI_CMD_IDLE;
2438 mb(); /* Declare command idle before checking for pending events. */
2439 if (c->reset_pending) {
2440 unsigned long flags;
2441 struct hpsa_scsi_dev_t *dev;
2444 * There appears to be a reset pending; lock the lock and
2445 * reconfirm. If so, then decrement the count of outstanding
2446 * commands and wake the reset command if this is the last one.
2448 spin_lock_irqsave(&h->lock, flags);
2449 dev = c->reset_pending; /* Re-fetch under the lock. */
2450 if (dev && atomic_dec_and_test(&dev->reset_cmds_out))
2452 c->reset_pending = NULL;
2453 spin_unlock_irqrestore(&h->lock, flags);
2457 wake_up_all(&h->event_sync_wait_queue);
2460 static void hpsa_cmd_resolve_and_free(struct ctlr_info *h,
2461 struct CommandList *c)
2463 hpsa_cmd_resolve_events(h, c);
2464 cmd_tagged_free(h, c);
2467 static void hpsa_cmd_free_and_done(struct ctlr_info *h,
2468 struct CommandList *c, struct scsi_cmnd *cmd)
2470 hpsa_cmd_resolve_and_free(h, c);
2471 if (cmd && cmd->scsi_done)
2472 cmd->scsi_done(cmd);
2475 static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c)
2477 INIT_WORK(&c->work, hpsa_command_resubmit_worker);
2478 queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
2481 static void process_ioaccel2_completion(struct ctlr_info *h,
2482 struct CommandList *c, struct scsi_cmnd *cmd,
2483 struct hpsa_scsi_dev_t *dev)
2485 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
2487 /* check for good status */
2488 if (likely(c2->error_data.serv_response == 0 &&
2489 c2->error_data.status == 0)) {
2491 return hpsa_cmd_free_and_done(h, c, cmd);
2495 * Any RAID offload error results in retry which will use
2496 * the normal I/O path so the controller can handle whatever is
2499 if (is_logical_device(dev) &&
2500 c2->error_data.serv_response ==
2501 IOACCEL2_SERV_RESPONSE_FAILURE) {
2502 if (c2->error_data.status ==
2503 IOACCEL2_STATUS_SR_IOACCEL_DISABLED) {
2504 hpsa_turn_off_ioaccel_for_device(dev);
2507 return hpsa_retry_cmd(h, c);
2510 if (handle_ioaccel_mode2_error(h, c, cmd, c2, dev))
2511 return hpsa_retry_cmd(h, c);
2513 return hpsa_cmd_free_and_done(h, c, cmd);
2516 /* Returns 0 on success, < 0 otherwise. */
2517 static int hpsa_evaluate_tmf_status(struct ctlr_info *h,
2518 struct CommandList *cp)
2520 u8 tmf_status = cp->err_info->ScsiStatus;
2522 switch (tmf_status) {
2523 case CISS_TMF_COMPLETE:
2525 * CISS_TMF_COMPLETE never happens, instead,
2526 * ei->CommandStatus == 0 for this case.
2528 case CISS_TMF_SUCCESS:
2530 case CISS_TMF_INVALID_FRAME:
2531 case CISS_TMF_NOT_SUPPORTED:
2532 case CISS_TMF_FAILED:
2533 case CISS_TMF_WRONG_LUN:
2534 case CISS_TMF_OVERLAPPED_TAG:
2537 dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n",
2544 static void complete_scsi_command(struct CommandList *cp)
2546 struct scsi_cmnd *cmd;
2547 struct ctlr_info *h;
2548 struct ErrorInfo *ei;
2549 struct hpsa_scsi_dev_t *dev;
2550 struct io_accel2_cmd *c2;
2553 u8 asc; /* additional sense code */
2554 u8 ascq; /* additional sense code qualifier */
2555 unsigned long sense_data_size;
2562 cmd->result = DID_NO_CONNECT << 16;
2563 return hpsa_cmd_free_and_done(h, cp, cmd);
2566 dev = cmd->device->hostdata;
2568 cmd->result = DID_NO_CONNECT << 16;
2569 return hpsa_cmd_free_and_done(h, cp, cmd);
2571 c2 = &h->ioaccel2_cmd_pool[cp->cmdindex];
2573 scsi_dma_unmap(cmd); /* undo the DMA mappings */
2574 if ((cp->cmd_type == CMD_SCSI) &&
2575 (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries))
2576 hpsa_unmap_sg_chain_block(h, cp);
2578 if ((cp->cmd_type == CMD_IOACCEL2) &&
2579 (c2->sg[0].chain_indicator == IOACCEL2_CHAIN))
2580 hpsa_unmap_ioaccel2_sg_chain_block(h, c2);
2582 cmd->result = (DID_OK << 16); /* host byte */
2583 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
2585 if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1) {
2586 if (dev->physical_device && dev->expose_device &&
2588 cmd->result = DID_NO_CONNECT << 16;
2589 return hpsa_cmd_free_and_done(h, cp, cmd);
2591 if (likely(cp->phys_disk != NULL))
2592 atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
2596 * We check for lockup status here as it may be set for
2597 * CMD_SCSI, CMD_IOACCEL1 and CMD_IOACCEL2 commands by
2598 * fail_all_oustanding_cmds()
2600 if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) {
2601 /* DID_NO_CONNECT will prevent a retry */
2602 cmd->result = DID_NO_CONNECT << 16;
2603 return hpsa_cmd_free_and_done(h, cp, cmd);
2606 if ((unlikely(hpsa_is_pending_event(cp))))
2607 if (cp->reset_pending)
2608 return hpsa_cmd_free_and_done(h, cp, cmd);
2610 if (cp->cmd_type == CMD_IOACCEL2)
2611 return process_ioaccel2_completion(h, cp, cmd, dev);
2613 scsi_set_resid(cmd, ei->ResidualCnt);
2614 if (ei->CommandStatus == 0)
2615 return hpsa_cmd_free_and_done(h, cp, cmd);
2617 /* For I/O accelerator commands, copy over some fields to the normal
2618 * CISS header used below for error handling.
2620 if (cp->cmd_type == CMD_IOACCEL1) {
2621 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
2622 cp->Header.SGList = scsi_sg_count(cmd);
2623 cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList);
2624 cp->Request.CDBLen = le16_to_cpu(c->io_flags) &
2625 IOACCEL1_IOFLAGS_CDBLEN_MASK;
2626 cp->Header.tag = c->tag;
2627 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
2628 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
2630 /* Any RAID offload error results in retry which will use
2631 * the normal I/O path so the controller can handle whatever's
2634 if (is_logical_device(dev)) {
2635 if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
2636 dev->offload_enabled = 0;
2637 return hpsa_retry_cmd(h, cp);
2641 /* an error has occurred */
2642 switch (ei->CommandStatus) {
2644 case CMD_TARGET_STATUS:
2645 cmd->result |= ei->ScsiStatus;
2646 /* copy the sense data */
2647 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
2648 sense_data_size = SCSI_SENSE_BUFFERSIZE;
2650 sense_data_size = sizeof(ei->SenseInfo);
2651 if (ei->SenseLen < sense_data_size)
2652 sense_data_size = ei->SenseLen;
2653 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
2655 decode_sense_data(ei->SenseInfo, sense_data_size,
2656 &sense_key, &asc, &ascq);
2657 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
2658 if (sense_key == ABORTED_COMMAND) {
2659 cmd->result |= DID_SOFT_ERROR << 16;
2664 /* Problem was not a check condition
2665 * Pass it up to the upper layers...
2667 if (ei->ScsiStatus) {
2668 dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
2669 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
2670 "Returning result: 0x%x\n",
2672 sense_key, asc, ascq,
2674 } else { /* scsi status is zero??? How??? */
2675 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
2676 "Returning no connection.\n", cp),
2678 /* Ordinarily, this case should never happen,
2679 * but there is a bug in some released firmware
2680 * revisions that allows it to happen if, for
2681 * example, a 4100 backplane loses power and
2682 * the tape drive is in it. We assume that
2683 * it's a fatal error of some kind because we
2684 * can't show that it wasn't. We will make it
2685 * look like selection timeout since that is
2686 * the most common reason for this to occur,
2687 * and it's severe enough.
2690 cmd->result = DID_NO_CONNECT << 16;
2694 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2696 case CMD_DATA_OVERRUN:
2697 dev_warn(&h->pdev->dev,
2698 "CDB %16phN data overrun\n", cp->Request.CDB);
2701 /* print_bytes(cp, sizeof(*cp), 1, 0);
2703 /* We get CMD_INVALID if you address a non-existent device
2704 * instead of a selection timeout (no response). You will
2705 * see this if you yank out a drive, then try to access it.
2706 * This is kind of a shame because it means that any other
2707 * CMD_INVALID (e.g. driver bug) will get interpreted as a
2708 * missing target. */
2709 cmd->result = DID_NO_CONNECT << 16;
2712 case CMD_PROTOCOL_ERR:
2713 cmd->result = DID_ERROR << 16;
2714 dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n",
2717 case CMD_HARDWARE_ERR:
2718 cmd->result = DID_ERROR << 16;
2719 dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n",
2722 case CMD_CONNECTION_LOST:
2723 cmd->result = DID_ERROR << 16;
2724 dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n",
2728 cmd->result = DID_ABORT << 16;
2730 case CMD_ABORT_FAILED:
2731 cmd->result = DID_ERROR << 16;
2732 dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n",
2735 case CMD_UNSOLICITED_ABORT:
2736 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
2737 dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n",
2741 cmd->result = DID_TIME_OUT << 16;
2742 dev_warn(&h->pdev->dev, "CDB %16phN timed out\n",
2745 case CMD_UNABORTABLE:
2746 cmd->result = DID_ERROR << 16;
2747 dev_warn(&h->pdev->dev, "Command unabortable\n");
2749 case CMD_TMF_STATUS:
2750 if (hpsa_evaluate_tmf_status(h, cp)) /* TMF failed? */
2751 cmd->result = DID_ERROR << 16;
2753 case CMD_IOACCEL_DISABLED:
2754 /* This only handles the direct pass-through case since RAID
2755 * offload is handled above. Just attempt a retry.
2757 cmd->result = DID_SOFT_ERROR << 16;
2758 dev_warn(&h->pdev->dev,
2759 "cp %p had HP SSD Smart Path error\n", cp);
2762 cmd->result = DID_ERROR << 16;
2763 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
2764 cp, ei->CommandStatus);
2767 return hpsa_cmd_free_and_done(h, cp, cmd);
2770 static void hpsa_pci_unmap(struct pci_dev *pdev,
2771 struct CommandList *c, int sg_used, int data_direction)
2775 for (i = 0; i < sg_used; i++)
2776 pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr),
2777 le32_to_cpu(c->SG[i].Len),
2781 static int hpsa_map_one(struct pci_dev *pdev,
2782 struct CommandList *cp,
2789 if (buflen == 0 || data_direction == PCI_DMA_NONE) {
2790 cp->Header.SGList = 0;
2791 cp->Header.SGTotal = cpu_to_le16(0);
2795 addr64 = pci_map_single(pdev, buf, buflen, data_direction);
2796 if (dma_mapping_error(&pdev->dev, addr64)) {
2797 /* Prevent subsequent unmap of something never mapped */
2798 cp->Header.SGList = 0;
2799 cp->Header.SGTotal = cpu_to_le16(0);
2802 cp->SG[0].Addr = cpu_to_le64(addr64);
2803 cp->SG[0].Len = cpu_to_le32(buflen);
2804 cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */
2805 cp->Header.SGList = 1; /* no. SGs contig in this cmd */
2806 cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */
2810 #define NO_TIMEOUT ((unsigned long) -1)
2811 #define DEFAULT_TIMEOUT 30000 /* milliseconds */
2812 static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
2813 struct CommandList *c, int reply_queue, unsigned long timeout_msecs)
2815 DECLARE_COMPLETION_ONSTACK(wait);
2818 __enqueue_cmd_and_start_io(h, c, reply_queue);
2819 if (timeout_msecs == NO_TIMEOUT) {
2820 /* TODO: get rid of this no-timeout thing */
2821 wait_for_completion_io(&wait);
2824 if (!wait_for_completion_io_timeout(&wait,
2825 msecs_to_jiffies(timeout_msecs))) {
2826 dev_warn(&h->pdev->dev, "Command timed out.\n");
2832 static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c,
2833 int reply_queue, unsigned long timeout_msecs)
2835 if (unlikely(lockup_detected(h))) {
2836 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
2839 return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs);
2842 static u32 lockup_detected(struct ctlr_info *h)
2845 u32 rc, *lockup_detected;
2848 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
2849 rc = *lockup_detected;
2854 #define MAX_DRIVER_CMD_RETRIES 25
2855 static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
2856 struct CommandList *c, int data_direction, unsigned long timeout_msecs)
2858 int backoff_time = 10, retry_count = 0;
2862 memset(c->err_info, 0, sizeof(*c->err_info));
2863 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
2868 if (retry_count > 3) {
2869 msleep(backoff_time);
2870 if (backoff_time < 1000)
2873 } while ((check_for_unit_attention(h, c) ||
2874 check_for_busy(h, c)) &&
2875 retry_count <= MAX_DRIVER_CMD_RETRIES);
2876 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
2877 if (retry_count > MAX_DRIVER_CMD_RETRIES)
2882 static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
2883 struct CommandList *c)
2885 const u8 *cdb = c->Request.CDB;
2886 const u8 *lun = c->Header.LUN.LunAddrBytes;
2888 dev_warn(&h->pdev->dev, "%s: LUN:%8phN CDB:%16phN\n",
2892 static void hpsa_scsi_interpret_error(struct ctlr_info *h,
2893 struct CommandList *cp)
2895 const struct ErrorInfo *ei = cp->err_info;
2896 struct device *d = &cp->h->pdev->dev;
2897 u8 sense_key, asc, ascq;
2900 switch (ei->CommandStatus) {
2901 case CMD_TARGET_STATUS:
2902 if (ei->SenseLen > sizeof(ei->SenseInfo))
2903 sense_len = sizeof(ei->SenseInfo);
2905 sense_len = ei->SenseLen;
2906 decode_sense_data(ei->SenseInfo, sense_len,
2907 &sense_key, &asc, &ascq);
2908 hpsa_print_cmd(h, "SCSI status", cp);
2909 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
2910 dev_warn(d, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n",
2911 sense_key, asc, ascq);
2913 dev_warn(d, "SCSI Status = 0x%02x\n", ei->ScsiStatus);
2914 if (ei->ScsiStatus == 0)
2915 dev_warn(d, "SCSI status is abnormally zero. "
2916 "(probably indicates selection timeout "
2917 "reported incorrectly due to a known "
2918 "firmware bug, circa July, 2001.)\n");
2920 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2922 case CMD_DATA_OVERRUN:
2923 hpsa_print_cmd(h, "overrun condition", cp);
2926 /* controller unfortunately reports SCSI passthru's
2927 * to non-existent targets as invalid commands.
2929 hpsa_print_cmd(h, "invalid command", cp);
2930 dev_warn(d, "probably means device no longer present\n");
2933 case CMD_PROTOCOL_ERR:
2934 hpsa_print_cmd(h, "protocol error", cp);
2936 case CMD_HARDWARE_ERR:
2937 hpsa_print_cmd(h, "hardware error", cp);
2939 case CMD_CONNECTION_LOST:
2940 hpsa_print_cmd(h, "connection lost", cp);
2943 hpsa_print_cmd(h, "aborted", cp);
2945 case CMD_ABORT_FAILED:
2946 hpsa_print_cmd(h, "abort failed", cp);
2948 case CMD_UNSOLICITED_ABORT:
2949 hpsa_print_cmd(h, "unsolicited abort", cp);
2952 hpsa_print_cmd(h, "timed out", cp);
2954 case CMD_UNABORTABLE:
2955 hpsa_print_cmd(h, "unabortable", cp);
2957 case CMD_CTLR_LOCKUP:
2958 hpsa_print_cmd(h, "controller lockup detected", cp);
2961 hpsa_print_cmd(h, "unknown status", cp);
2962 dev_warn(d, "Unknown command status %x\n",
2967 static int hpsa_do_receive_diagnostic(struct ctlr_info *h, u8 *scsi3addr,
2968 u8 page, u8 *buf, size_t bufsize)
2971 struct CommandList *c;
2972 struct ErrorInfo *ei;
2975 if (fill_cmd(c, RECEIVE_DIAGNOSTIC, h, buf, bufsize,
2976 page, scsi3addr, TYPE_CMD)) {
2980 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2981 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2985 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2986 hpsa_scsi_interpret_error(h, c);
2994 static u64 hpsa_get_enclosure_logical_identifier(struct ctlr_info *h,
3001 buf = kzalloc(1024, GFP_KERNEL);
3005 rc = hpsa_do_receive_diagnostic(h, scsi3addr, RECEIVE_DIAGNOSTIC,
3011 sa = get_unaligned_be64(buf+12);
3018 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
3019 u16 page, unsigned char *buf,
3020 unsigned char bufsize)
3023 struct CommandList *c;
3024 struct ErrorInfo *ei;
3028 if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
3029 page, scsi3addr, TYPE_CMD)) {
3033 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3034 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
3038 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3039 hpsa_scsi_interpret_error(h, c);
3047 static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
3048 u8 reset_type, int reply_queue)
3051 struct CommandList *c;
3052 struct ErrorInfo *ei;
3057 /* fill_cmd can't fail here, no data buffer to map. */
3058 (void) fill_cmd(c, reset_type, h, NULL, 0, 0,
3059 scsi3addr, TYPE_MSG);
3060 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
3062 dev_warn(&h->pdev->dev, "Failed to send reset command\n");
3065 /* no unmap needed here because no data xfer. */
3068 if (ei->CommandStatus != 0) {
3069 hpsa_scsi_interpret_error(h, c);
3077 static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
3078 struct hpsa_scsi_dev_t *dev,
3079 unsigned char *scsi3addr)
3083 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
3084 struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
3086 if (hpsa_is_cmd_idle(c))
3089 switch (c->cmd_type) {
3091 case CMD_IOCTL_PEND:
3092 match = !memcmp(scsi3addr, &c->Header.LUN.LunAddrBytes,
3093 sizeof(c->Header.LUN.LunAddrBytes));
3098 if (c->phys_disk == dev) {
3099 /* HBA mode match */
3102 /* Possible RAID mode -- check each phys dev. */
3103 /* FIXME: Do we need to take out a lock here? If
3104 * so, we could just call hpsa_get_pdisk_of_ioaccel2()
3106 for (i = 0; i < dev->nphysical_disks && !match; i++) {
3107 /* FIXME: an alternate test might be
3109 * match = dev->phys_disk[i]->ioaccel_handle
3110 * == c2->scsi_nexus; */
3111 match = dev->phys_disk[i] == c->phys_disk;
3117 for (i = 0; i < dev->nphysical_disks && !match; i++) {
3118 match = dev->phys_disk[i]->ioaccel_handle ==
3119 le32_to_cpu(ac->it_nexus);
3123 case 0: /* The command is in the middle of being initialized. */
3128 dev_err(&h->pdev->dev, "unexpected cmd_type: %d\n",
3136 static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
3137 unsigned char *scsi3addr, u8 reset_type, int reply_queue)
3142 /* We can really only handle one reset at a time */
3143 if (mutex_lock_interruptible(&h->reset_mutex) == -EINTR) {
3144 dev_warn(&h->pdev->dev, "concurrent reset wait interrupted.\n");
3148 BUG_ON(atomic_read(&dev->reset_cmds_out) != 0);
3150 for (i = 0; i < h->nr_cmds; i++) {
3151 struct CommandList *c = h->cmd_pool + i;
3152 int refcount = atomic_inc_return(&c->refcount);
3154 if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev, scsi3addr)) {
3155 unsigned long flags;
3158 * Mark the target command as having a reset pending,
3159 * then lock a lock so that the command cannot complete
3160 * while we're considering it. If the command is not
3161 * idle then count it; otherwise revoke the event.
3163 c->reset_pending = dev;
3164 spin_lock_irqsave(&h->lock, flags); /* Implied MB */
3165 if (!hpsa_is_cmd_idle(c))
3166 atomic_inc(&dev->reset_cmds_out);
3168 c->reset_pending = NULL;
3169 spin_unlock_irqrestore(&h->lock, flags);
3175 rc = hpsa_send_reset(h, scsi3addr, reset_type, reply_queue);
3177 wait_event(h->event_sync_wait_queue,
3178 atomic_read(&dev->reset_cmds_out) == 0 ||
3179 lockup_detected(h));
3181 if (unlikely(lockup_detected(h))) {
3182 dev_warn(&h->pdev->dev,
3183 "Controller lockup detected during reset wait\n");
3188 atomic_set(&dev->reset_cmds_out, 0);
3190 rc = wait_for_device_to_become_ready(h, scsi3addr, 0);
3192 mutex_unlock(&h->reset_mutex);
3196 static void hpsa_get_raid_level(struct ctlr_info *h,
3197 unsigned char *scsi3addr, unsigned char *raid_level)
3202 *raid_level = RAID_UNKNOWN;
3203 buf = kzalloc(64, GFP_KERNEL);
3207 if (!hpsa_vpd_page_supported(h, scsi3addr,
3208 HPSA_VPD_LV_DEVICE_GEOMETRY))
3211 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE |
3212 HPSA_VPD_LV_DEVICE_GEOMETRY, buf, 64);
3215 *raid_level = buf[8];
3216 if (*raid_level > RAID_UNKNOWN)
3217 *raid_level = RAID_UNKNOWN;
3223 #define HPSA_MAP_DEBUG
3224 #ifdef HPSA_MAP_DEBUG
3225 static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
3226 struct raid_map_data *map_buff)
3228 struct raid_map_disk_data *dd = &map_buff->data[0];
3230 u16 map_cnt, row_cnt, disks_per_row;
3235 /* Show details only if debugging has been activated. */
3236 if (h->raid_offload_debug < 2)
3239 dev_info(&h->pdev->dev, "structure_size = %u\n",
3240 le32_to_cpu(map_buff->structure_size));
3241 dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
3242 le32_to_cpu(map_buff->volume_blk_size));
3243 dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
3244 le64_to_cpu(map_buff->volume_blk_cnt));
3245 dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
3246 map_buff->phys_blk_shift);
3247 dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
3248 map_buff->parity_rotation_shift);
3249 dev_info(&h->pdev->dev, "strip_size = %u\n",
3250 le16_to_cpu(map_buff->strip_size));
3251 dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
3252 le64_to_cpu(map_buff->disk_starting_blk));
3253 dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
3254 le64_to_cpu(map_buff->disk_blk_cnt));
3255 dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
3256 le16_to_cpu(map_buff->data_disks_per_row));
3257 dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
3258 le16_to_cpu(map_buff->metadata_disks_per_row));
3259 dev_info(&h->pdev->dev, "row_cnt = %u\n",
3260 le16_to_cpu(map_buff->row_cnt));
3261 dev_info(&h->pdev->dev, "layout_map_count = %u\n",
3262 le16_to_cpu(map_buff->layout_map_count));
3263 dev_info(&h->pdev->dev, "flags = 0x%x\n",
3264 le16_to_cpu(map_buff->flags));
3265 dev_info(&h->pdev->dev, "encryption = %s\n",
3266 le16_to_cpu(map_buff->flags) &
3267 RAID_MAP_FLAG_ENCRYPT_ON ? "ON" : "OFF");
3268 dev_info(&h->pdev->dev, "dekindex = %u\n",
3269 le16_to_cpu(map_buff->dekindex));
3270 map_cnt = le16_to_cpu(map_buff->layout_map_count);
3271 for (map = 0; map < map_cnt; map++) {
3272 dev_info(&h->pdev->dev, "Map%u:\n", map);
3273 row_cnt = le16_to_cpu(map_buff->row_cnt);
3274 for (row = 0; row < row_cnt; row++) {
3275 dev_info(&h->pdev->dev, " Row%u:\n", row);
3277 le16_to_cpu(map_buff->data_disks_per_row);
3278 for (col = 0; col < disks_per_row; col++, dd++)
3279 dev_info(&h->pdev->dev,
3280 " D%02u: h=0x%04x xor=%u,%u\n",
3281 col, dd->ioaccel_handle,
3282 dd->xor_mult[0], dd->xor_mult[1]);
3284 le16_to_cpu(map_buff->metadata_disks_per_row);
3285 for (col = 0; col < disks_per_row; col++, dd++)
3286 dev_info(&h->pdev->dev,
3287 " M%02u: h=0x%04x xor=%u,%u\n",
3288 col, dd->ioaccel_handle,
3289 dd->xor_mult[0], dd->xor_mult[1]);
3294 static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
3295 __attribute__((unused)) int rc,
3296 __attribute__((unused)) struct raid_map_data *map_buff)
3301 static int hpsa_get_raid_map(struct ctlr_info *h,
3302 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
3305 struct CommandList *c;
3306 struct ErrorInfo *ei;
3310 if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
3311 sizeof(this_device->raid_map), 0,
3312 scsi3addr, TYPE_CMD)) {
3313 dev_warn(&h->pdev->dev, "hpsa_get_raid_map fill_cmd failed\n");
3317 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3318 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
3322 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3323 hpsa_scsi_interpret_error(h, c);
3329 /* @todo in the future, dynamically allocate RAID map memory */
3330 if (le32_to_cpu(this_device->raid_map.structure_size) >
3331 sizeof(this_device->raid_map)) {
3332 dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
3335 hpsa_debug_map_buff(h, rc, &this_device->raid_map);
3342 static int hpsa_bmic_sense_subsystem_information(struct ctlr_info *h,
3343 unsigned char scsi3addr[], u16 bmic_device_index,
3344 struct bmic_sense_subsystem_info *buf, size_t bufsize)
3347 struct CommandList *c;
3348 struct ErrorInfo *ei;
3352 rc = fill_cmd(c, BMIC_SENSE_SUBSYSTEM_INFORMATION, h, buf, bufsize,
3353 0, RAID_CTLR_LUNID, TYPE_CMD);
3357 c->Request.CDB[2] = bmic_device_index & 0xff;
3358 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
3360 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3361 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
3365 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3366 hpsa_scsi_interpret_error(h, c);
3374 static int hpsa_bmic_id_controller(struct ctlr_info *h,
3375 struct bmic_identify_controller *buf, size_t bufsize)
3378 struct CommandList *c;
3379 struct ErrorInfo *ei;
3383 rc = fill_cmd(c, BMIC_IDENTIFY_CONTROLLER, h, buf, bufsize,
3384 0, RAID_CTLR_LUNID, TYPE_CMD);
3388 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3389 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
3393 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3394 hpsa_scsi_interpret_error(h, c);
3402 static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
3403 unsigned char scsi3addr[], u16 bmic_device_index,
3404 struct bmic_identify_physical_device *buf, size_t bufsize)
3407 struct CommandList *c;
3408 struct ErrorInfo *ei;
3411 rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize,
3412 0, RAID_CTLR_LUNID, TYPE_CMD);
3416 c->Request.CDB[2] = bmic_device_index & 0xff;
3417 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
3419 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
3422 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3423 hpsa_scsi_interpret_error(h, c);
3433 * get enclosure information
3434 * struct ReportExtendedLUNdata *rlep - Used for BMIC drive number
3435 * struct hpsa_scsi_dev_t *encl_dev - device entry for enclosure
3436 * Uses id_physical_device to determine the box_index.
3438 static void hpsa_get_enclosure_info(struct ctlr_info *h,
3439 unsigned char *scsi3addr,
3440 struct ReportExtendedLUNdata *rlep, int rle_index,
3441 struct hpsa_scsi_dev_t *encl_dev)
3444 struct CommandList *c = NULL;
3445 struct ErrorInfo *ei = NULL;
3446 struct bmic_sense_storage_box_params *bssbp = NULL;
3447 struct bmic_identify_physical_device *id_phys = NULL;
3448 struct ext_report_lun_entry *rle = &rlep->LUN[rle_index];
3449 u16 bmic_device_index = 0;
3452 hpsa_get_enclosure_logical_identifier(h, scsi3addr);
3454 bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]);
3456 if (encl_dev->target == -1 || encl_dev->lun == -1) {
3461 if (bmic_device_index == 0xFF00 || MASKED_DEVICE(&rle->lunid[0])) {
3466 bssbp = kzalloc(sizeof(*bssbp), GFP_KERNEL);
3470 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
3474 rc = hpsa_bmic_id_physical_device(h, scsi3addr, bmic_device_index,
3475 id_phys, sizeof(*id_phys));
3477 dev_warn(&h->pdev->dev, "%s: id_phys failed %d bdi[0x%x]\n",
3478 __func__, encl_dev->external, bmic_device_index);
3484 rc = fill_cmd(c, BMIC_SENSE_STORAGE_BOX_PARAMS, h, bssbp,
3485 sizeof(*bssbp), 0, RAID_CTLR_LUNID, TYPE_CMD);
3490 if (id_phys->phys_connector[1] == 'E')
3491 c->Request.CDB[5] = id_phys->box_index;
3493 c->Request.CDB[5] = 0;
3495 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
3501 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3506 encl_dev->box[id_phys->active_path_number] = bssbp->phys_box_on_port;
3507 memcpy(&encl_dev->phys_connector[id_phys->active_path_number],
3508 bssbp->phys_connector, sizeof(bssbp->phys_connector));
3519 hpsa_show_dev_msg(KERN_INFO, h, encl_dev,
3520 "Error, could not get enclosure information");
3523 static u64 hpsa_get_sas_address_from_report_physical(struct ctlr_info *h,
3524 unsigned char *scsi3addr)
3526 struct ReportExtendedLUNdata *physdev;
3531 physdev = kzalloc(sizeof(*physdev), GFP_KERNEL);
3535 if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
3536 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
3540 nphysicals = get_unaligned_be32(physdev->LUNListLength) / 24;
3542 for (i = 0; i < nphysicals; i++)
3543 if (!memcmp(&physdev->LUN[i].lunid[0], scsi3addr, 8)) {
3544 sa = get_unaligned_be64(&physdev->LUN[i].wwid[0]);
3553 static void hpsa_get_sas_address(struct ctlr_info *h, unsigned char *scsi3addr,
3554 struct hpsa_scsi_dev_t *dev)
3559 if (is_hba_lunid(scsi3addr)) {
3560 struct bmic_sense_subsystem_info *ssi;
3562 ssi = kzalloc(sizeof(*ssi), GFP_KERNEL);
3566 rc = hpsa_bmic_sense_subsystem_information(h,
3567 scsi3addr, 0, ssi, sizeof(*ssi));
3569 sa = get_unaligned_be64(ssi->primary_world_wide_id);
3570 h->sas_address = sa;
3575 sa = hpsa_get_sas_address_from_report_physical(h, scsi3addr);
3577 dev->sas_address = sa;
3580 static void hpsa_ext_ctrl_present(struct ctlr_info *h,
3581 struct ReportExtendedLUNdata *physdev)
3586 if (h->discovery_polling)
3589 nphysicals = (get_unaligned_be32(physdev->LUNListLength) / 24) + 1;
3591 for (i = 0; i < nphysicals; i++) {
3592 if (physdev->LUN[i].device_type ==
3593 BMIC_DEVICE_TYPE_CONTROLLER
3594 && !is_hba_lunid(physdev->LUN[i].lunid)) {
3595 dev_info(&h->pdev->dev,
3596 "External controller present, activate discovery polling and disable rld caching\n");
3597 hpsa_disable_rld_caching(h);
3598 h->discovery_polling = 1;
3604 /* Get a device id from inquiry page 0x83 */
3605 static bool hpsa_vpd_page_supported(struct ctlr_info *h,
3606 unsigned char scsi3addr[], u8 page)
3611 unsigned char *buf, bufsize;
3613 buf = kzalloc(256, GFP_KERNEL);
3617 /* Get the size of the page list first */
3618 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3619 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
3620 buf, HPSA_VPD_HEADER_SZ);
3622 goto exit_unsupported;
3624 if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
3625 bufsize = pages + HPSA_VPD_HEADER_SZ;
3629 /* Get the whole VPD page list */
3630 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3631 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
3634 goto exit_unsupported;
3637 for (i = 1; i <= pages; i++)
3638 if (buf[3 + i] == page)
3639 goto exit_supported;
3649 * Called during a scan operation.
3650 * Sets ioaccel status on the new device list, not the existing device list
3652 * The device list used during I/O will be updated later in
3653 * adjust_hpsa_scsi_table.
3655 static void hpsa_get_ioaccel_status(struct ctlr_info *h,
3656 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
3662 this_device->offload_config = 0;
3663 this_device->offload_enabled = 0;
3664 this_device->offload_to_be_enabled = 0;
3666 buf = kzalloc(64, GFP_KERNEL);
3669 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
3671 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3672 VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
3676 #define IOACCEL_STATUS_BYTE 4
3677 #define OFFLOAD_CONFIGURED_BIT 0x01
3678 #define OFFLOAD_ENABLED_BIT 0x02
3679 ioaccel_status = buf[IOACCEL_STATUS_BYTE];
3680 this_device->offload_config =
3681 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
3682 if (this_device->offload_config) {
3683 bool offload_enabled =
3684 !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
3686 * Check to see if offload can be enabled.
3688 if (offload_enabled) {
3689 rc = hpsa_get_raid_map(h, scsi3addr, this_device);
3690 if (rc) /* could not load raid_map */
3692 this_device->offload_to_be_enabled = 1;
3701 /* Get the device id from inquiry page 0x83 */
3702 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
3703 unsigned char *device_id, int index, int buflen)
3708 /* Does controller have VPD for device id? */
3709 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_DEVICE_ID))
3710 return 1; /* not supported */
3712 buf = kzalloc(64, GFP_KERNEL);
3716 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE |
3717 HPSA_VPD_LV_DEVICE_ID, buf, 64);
3721 memcpy(device_id, &buf[8], buflen);
3726 return rc; /*0 - got id, otherwise, didn't */
3729 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
3730 void *buf, int bufsize,
3731 int extended_response)
3734 struct CommandList *c;
3735 unsigned char scsi3addr[8];
3736 struct ErrorInfo *ei;
3740 /* address the controller */
3741 memset(scsi3addr, 0, sizeof(scsi3addr));
3742 if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
3743 buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
3747 if (extended_response)
3748 c->Request.CDB[1] = extended_response;
3749 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3750 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
3754 if (ei->CommandStatus != 0 &&
3755 ei->CommandStatus != CMD_DATA_UNDERRUN) {
3756 hpsa_scsi_interpret_error(h, c);
3759 struct ReportLUNdata *rld = buf;
3761 if (rld->extended_response_flag != extended_response) {
3762 if (!h->legacy_board) {
3763 dev_err(&h->pdev->dev,
3764 "report luns requested format %u, got %u\n",
3766 rld->extended_response_flag);
3777 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
3778 struct ReportExtendedLUNdata *buf, int bufsize)
3781 struct ReportLUNdata *lbuf;
3783 rc = hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
3784 HPSA_REPORT_PHYS_EXTENDED);
3785 if (!rc || rc != -EOPNOTSUPP)
3788 /* REPORT PHYS EXTENDED is not supported */
3789 lbuf = kzalloc(sizeof(*lbuf), GFP_KERNEL);
3793 rc = hpsa_scsi_do_report_luns(h, 0, lbuf, sizeof(*lbuf), 0);
3798 /* Copy ReportLUNdata header */
3799 memcpy(buf, lbuf, 8);
3800 nphys = be32_to_cpu(*((__be32 *)lbuf->LUNListLength)) / 8;
3801 for (i = 0; i < nphys; i++)
3802 memcpy(buf->LUN[i].lunid, lbuf->LUN[i], 8);
3808 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
3809 struct ReportLUNdata *buf, int bufsize)
3811 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
3814 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
3815 int bus, int target, int lun)
3818 device->target = target;
3822 /* Use VPD inquiry to get details of volume status */
3823 static int hpsa_get_volume_status(struct ctlr_info *h,
3824 unsigned char scsi3addr[])
3831 buf = kzalloc(64, GFP_KERNEL);
3833 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3835 /* Does controller have VPD for logical volume status? */
3836 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS))
3839 /* Get the size of the VPD return buffer */
3840 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3841 buf, HPSA_VPD_HEADER_SZ);
3846 /* Now get the whole VPD buffer */
3847 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3848 buf, size + HPSA_VPD_HEADER_SZ);
3851 status = buf[4]; /* status byte */
3857 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3860 /* Determine offline status of a volume.
3863 * 0xff (offline for unknown reasons)
3864 * # (integer code indicating one of several NOT READY states
3865 * describing why a volume is to be kept offline)
3867 static unsigned char hpsa_volume_offline(struct ctlr_info *h,
3868 unsigned char scsi3addr[])
3870 struct CommandList *c;
3871 unsigned char *sense;
3872 u8 sense_key, asc, ascq;
3877 #define ASC_LUN_NOT_READY 0x04
3878 #define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
3879 #define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
3883 (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
3884 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
3888 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3890 sense = c->err_info->SenseInfo;
3891 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
3892 sense_len = sizeof(c->err_info->SenseInfo);
3894 sense_len = c->err_info->SenseLen;
3895 decode_sense_data(sense, sense_len, &sense_key, &asc, &ascq);
3896 cmd_status = c->err_info->CommandStatus;
3897 scsi_status = c->err_info->ScsiStatus;
3900 /* Determine the reason for not ready state */
3901 ldstat = hpsa_get_volume_status(h, scsi3addr);
3903 /* Keep volume offline in certain cases: */
3905 case HPSA_LV_FAILED:
3906 case HPSA_LV_UNDERGOING_ERASE:
3907 case HPSA_LV_NOT_AVAILABLE:
3908 case HPSA_LV_UNDERGOING_RPI:
3909 case HPSA_LV_PENDING_RPI:
3910 case HPSA_LV_ENCRYPTED_NO_KEY:
3911 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
3912 case HPSA_LV_UNDERGOING_ENCRYPTION:
3913 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
3914 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
3916 case HPSA_VPD_LV_STATUS_UNSUPPORTED:
3917 /* If VPD status page isn't available,
3918 * use ASC/ASCQ to determine state
3920 if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
3921 (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
3930 static int hpsa_update_device_info(struct ctlr_info *h,
3931 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
3932 unsigned char *is_OBDR_device)
3935 #define OBDR_SIG_OFFSET 43
3936 #define OBDR_TAPE_SIG "$DR-10"
3937 #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
3938 #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
3940 unsigned char *inq_buff;
3941 unsigned char *obdr_sig;
3944 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
3950 /* Do an inquiry to the device to see what it is. */
3951 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
3952 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
3953 dev_err(&h->pdev->dev,
3954 "%s: inquiry failed, device will be skipped.\n",
3956 rc = HPSA_INQUIRY_FAILED;
3960 scsi_sanitize_inquiry_string(&inq_buff[8], 8);
3961 scsi_sanitize_inquiry_string(&inq_buff[16], 16);
3963 this_device->devtype = (inq_buff[0] & 0x1f);
3964 memcpy(this_device->scsi3addr, scsi3addr, 8);
3965 memcpy(this_device->vendor, &inq_buff[8],
3966 sizeof(this_device->vendor));
3967 memcpy(this_device->model, &inq_buff[16],
3968 sizeof(this_device->model));
3969 this_device->rev = inq_buff[2];
3970 memset(this_device->device_id, 0,
3971 sizeof(this_device->device_id));
3972 if (hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8,
3973 sizeof(this_device->device_id)) < 0)
3974 dev_err(&h->pdev->dev,
3975 "hpsa%d: %s: can't get device id for host %d:C0:T%d:L%d\t%s\t%.16s\n",
3977 h->scsi_host->host_no,
3978 this_device->target, this_device->lun,
3979 scsi_device_type(this_device->devtype),
3980 this_device->model);
3982 if ((this_device->devtype == TYPE_DISK ||
3983 this_device->devtype == TYPE_ZBC) &&
3984 is_logical_dev_addr_mode(scsi3addr)) {
3985 unsigned char volume_offline;
3987 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
3988 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
3989 hpsa_get_ioaccel_status(h, scsi3addr, this_device);
3990 volume_offline = hpsa_volume_offline(h, scsi3addr);
3991 if (volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED &&
3994 * Legacy boards might not support volume status
3996 dev_info(&h->pdev->dev,
3997 "C0:T%d:L%d Volume status not available, assuming online.\n",
3998 this_device->target, this_device->lun);
4001 this_device->volume_offline = volume_offline;
4002 if (volume_offline == HPSA_LV_FAILED) {
4003 rc = HPSA_LV_FAILED;
4004 dev_err(&h->pdev->dev,
4005 "%s: LV failed, device will be skipped.\n",
4010 this_device->raid_level = RAID_UNKNOWN;
4011 this_device->offload_config = 0;
4012 hpsa_turn_off_ioaccel_for_device(this_device);
4013 this_device->hba_ioaccel_enabled = 0;
4014 this_device->volume_offline = 0;
4015 this_device->queue_depth = h->nr_cmds;
4018 if (this_device->external)
4019 this_device->queue_depth = EXTERNAL_QD;
4021 if (is_OBDR_device) {
4022 /* See if this is a One-Button-Disaster-Recovery device
4023 * by looking for "$DR-10" at offset 43 in inquiry data.
4025 obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
4026 *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
4027 strncmp(obdr_sig, OBDR_TAPE_SIG,
4028 OBDR_SIG_LEN) == 0);
4039 * Helper function to assign bus, target, lun mapping of devices.
4040 * Logical drive target and lun are assigned at this time, but
4041 * physical device lun and target assignment are deferred (assigned
4042 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
4044 static void figure_bus_target_lun(struct ctlr_info *h,
4045 u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
4047 u32 lunid = get_unaligned_le32(lunaddrbytes);
4049 if (!is_logical_dev_addr_mode(lunaddrbytes)) {
4050 /* physical device, target and lun filled in later */
4051 if (is_hba_lunid(lunaddrbytes)) {
4052 int bus = HPSA_HBA_BUS;
4055 bus = HPSA_LEGACY_HBA_BUS;
4056 hpsa_set_bus_target_lun(device,
4057 bus, 0, lunid & 0x3fff);
4059 /* defer target, lun assignment for physical devices */
4060 hpsa_set_bus_target_lun(device,
4061 HPSA_PHYSICAL_DEVICE_BUS, -1, -1);
4064 /* It's a logical device */
4065 if (device->external) {
4066 hpsa_set_bus_target_lun(device,
4067 HPSA_EXTERNAL_RAID_VOLUME_BUS, (lunid >> 16) & 0x3fff,
4071 hpsa_set_bus_target_lun(device, HPSA_RAID_VOLUME_BUS,
4075 static int figure_external_status(struct ctlr_info *h, int raid_ctlr_position,
4076 int i, int nphysicals, int nlocal_logicals)
4078 /* In report logicals, local logicals are listed first,
4079 * then any externals.
4081 int logicals_start = nphysicals + (raid_ctlr_position == 0);
4083 if (i == raid_ctlr_position)
4086 if (i < logicals_start)
4089 /* i is in logicals range, but still within local logicals */
4090 if ((i - nphysicals - (raid_ctlr_position == 0)) < nlocal_logicals)
4093 return 1; /* it's an external lun */
4097 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
4098 * logdev. The number of luns in physdev and logdev are returned in
4099 * *nphysicals and *nlogicals, respectively.
4100 * Returns 0 on success, -1 otherwise.
4102 static int hpsa_gather_lun_info(struct ctlr_info *h,
4103 struct ReportExtendedLUNdata *physdev, u32 *nphysicals,
4104 struct ReportLUNdata *logdev, u32 *nlogicals)
4106 if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
4107 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
4110 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24;
4111 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
4112 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n",
4113 HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN);
4114 *nphysicals = HPSA_MAX_PHYS_LUN;
4116 if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) {
4117 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
4120 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
4121 /* Reject Logicals in excess of our max capability. */
4122 if (*nlogicals > HPSA_MAX_LUN) {
4123 dev_warn(&h->pdev->dev,
4124 "maximum logical LUNs (%d) exceeded. "
4125 "%d LUNs ignored.\n", HPSA_MAX_LUN,
4126 *nlogicals - HPSA_MAX_LUN);
4127 *nlogicals = HPSA_MAX_LUN;
4129 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
4130 dev_warn(&h->pdev->dev,
4131 "maximum logical + physical LUNs (%d) exceeded. "
4132 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
4133 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
4134 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
4139 static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position,
4140 int i, int nphysicals, int nlogicals,
4141 struct ReportExtendedLUNdata *physdev_list,
4142 struct ReportLUNdata *logdev_list)
4144 /* Helper function, figure out where the LUN ID info is coming from
4145 * given index i, lists of physical and logical devices, where in
4146 * the list the raid controller is supposed to appear (first or last)
4149 int logicals_start = nphysicals + (raid_ctlr_position == 0);
4150 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
4152 if (i == raid_ctlr_position)
4153 return RAID_CTLR_LUNID;
4155 if (i < logicals_start)
4156 return &physdev_list->LUN[i -
4157 (raid_ctlr_position == 0)].lunid[0];
4159 if (i < last_device)
4160 return &logdev_list->LUN[i - nphysicals -
4161 (raid_ctlr_position == 0)][0];
4166 /* get physical drive ioaccel handle and queue depth */
4167 static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
4168 struct hpsa_scsi_dev_t *dev,
4169 struct ReportExtendedLUNdata *rlep, int rle_index,
4170 struct bmic_identify_physical_device *id_phys)
4173 struct ext_report_lun_entry *rle;
4175 rle = &rlep->LUN[rle_index];
4177 dev->ioaccel_handle = rle->ioaccel_handle;
4178 if ((rle->device_flags & 0x08) && dev->ioaccel_handle)
4179 dev->hba_ioaccel_enabled = 1;
4180 memset(id_phys, 0, sizeof(*id_phys));
4181 rc = hpsa_bmic_id_physical_device(h, &rle->lunid[0],
4182 GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]), id_phys,
4185 /* Reserve space for FW operations */
4186 #define DRIVE_CMDS_RESERVED_FOR_FW 2
4187 #define DRIVE_QUEUE_DEPTH 7
4189 le16_to_cpu(id_phys->current_queue_depth_limit) -
4190 DRIVE_CMDS_RESERVED_FOR_FW;
4192 dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */
4195 static void hpsa_get_path_info(struct hpsa_scsi_dev_t *this_device,
4196 struct ReportExtendedLUNdata *rlep, int rle_index,
4197 struct bmic_identify_physical_device *id_phys)
4199 struct ext_report_lun_entry *rle = &rlep->LUN[rle_index];
4201 if ((rle->device_flags & 0x08) && this_device->ioaccel_handle)
4202 this_device->hba_ioaccel_enabled = 1;
4204 memcpy(&this_device->active_path_index,
4205 &id_phys->active_path_number,
4206 sizeof(this_device->active_path_index));
4207 memcpy(&this_device->path_map,
4208 &id_phys->redundant_path_present_map,
4209 sizeof(this_device->path_map));
4210 memcpy(&this_device->box,
4211 &id_phys->alternate_paths_phys_box_on_port,
4212 sizeof(this_device->box));
4213 memcpy(&this_device->phys_connector,
4214 &id_phys->alternate_paths_phys_connector,
4215 sizeof(this_device->phys_connector));
4216 memcpy(&this_device->bay,
4217 &id_phys->phys_bay_in_box,
4218 sizeof(this_device->bay));
4221 /* get number of local logical disks. */
4222 static int hpsa_set_local_logical_count(struct ctlr_info *h,
4223 struct bmic_identify_controller *id_ctlr,
4229 dev_warn(&h->pdev->dev, "%s: id_ctlr buffer is NULL.\n",
4233 memset(id_ctlr, 0, sizeof(*id_ctlr));
4234 rc = hpsa_bmic_id_controller(h, id_ctlr, sizeof(*id_ctlr));
4236 if (id_ctlr->configured_logical_drive_count < 255)
4237 *nlocals = id_ctlr->configured_logical_drive_count;
4239 *nlocals = le16_to_cpu(
4240 id_ctlr->extended_logical_unit_count);
4246 static bool hpsa_is_disk_spare(struct ctlr_info *h, u8 *lunaddrbytes)
4248 struct bmic_identify_physical_device *id_phys;
4249 bool is_spare = false;
4252 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
4256 rc = hpsa_bmic_id_physical_device(h,
4258 GET_BMIC_DRIVE_NUMBER(lunaddrbytes),
4259 id_phys, sizeof(*id_phys));
4261 is_spare = (id_phys->more_flags >> 6) & 0x01;
4267 #define RPL_DEV_FLAG_NON_DISK 0x1
4268 #define RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED 0x2
4269 #define RPL_DEV_FLAG_UNCONFIG_DISK 0x4
4271 #define BMIC_DEVICE_TYPE_ENCLOSURE 6
4273 static bool hpsa_skip_device(struct ctlr_info *h, u8 *lunaddrbytes,
4274 struct ext_report_lun_entry *rle)
4279 if (!MASKED_DEVICE(lunaddrbytes))
4282 device_flags = rle->device_flags;
4283 device_type = rle->device_type;
4285 if (device_flags & RPL_DEV_FLAG_NON_DISK) {
4286 if (device_type == BMIC_DEVICE_TYPE_ENCLOSURE)
4291 if (!(device_flags & RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED))
4294 if (device_flags & RPL_DEV_FLAG_UNCONFIG_DISK)
4298 * Spares may be spun down, we do not want to
4299 * do an Inquiry to a RAID set spare drive as
4300 * that would have them spun up, that is a
4301 * performance hit because I/O to the RAID device
4302 * stops while the spin up occurs which can take
4305 if (hpsa_is_disk_spare(h, lunaddrbytes))
4311 static void hpsa_update_scsi_devices(struct ctlr_info *h)
4313 /* the idea here is we could get notified
4314 * that some devices have changed, so we do a report
4315 * physical luns and report logical luns cmd, and adjust
4316 * our list of devices accordingly.
4318 * The scsi3addr's of devices won't change so long as the
4319 * adapter is not reset. That means we can rescan and
4320 * tell which devices we already know about, vs. new
4321 * devices, vs. disappearing devices.
4323 struct ReportExtendedLUNdata *physdev_list = NULL;
4324 struct ReportLUNdata *logdev_list = NULL;
4325 struct bmic_identify_physical_device *id_phys = NULL;
4326 struct bmic_identify_controller *id_ctlr = NULL;
4329 u32 nlocal_logicals = 0;
4330 u32 ndev_allocated = 0;
4331 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
4333 int i, n_ext_target_devs, ndevs_to_allocate;
4334 int raid_ctlr_position;
4335 bool physical_device;
4336 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
4338 currentsd = kcalloc(HPSA_MAX_DEVICES, sizeof(*currentsd), GFP_KERNEL);
4339 physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
4340 logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL);
4341 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
4342 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
4343 id_ctlr = kzalloc(sizeof(*id_ctlr), GFP_KERNEL);
4345 if (!currentsd || !physdev_list || !logdev_list ||
4346 !tmpdevice || !id_phys || !id_ctlr) {
4347 dev_err(&h->pdev->dev, "out of memory\n");
4350 memset(lunzerobits, 0, sizeof(lunzerobits));
4352 h->drv_req_rescan = 0; /* cancel scheduled rescan - we're doing it. */
4354 if (hpsa_gather_lun_info(h, physdev_list, &nphysicals,
4355 logdev_list, &nlogicals)) {
4356 h->drv_req_rescan = 1;
4360 /* Set number of local logicals (non PTRAID) */
4361 if (hpsa_set_local_logical_count(h, id_ctlr, &nlocal_logicals)) {
4362 dev_warn(&h->pdev->dev,
4363 "%s: Can't determine number of local logical devices.\n",
4367 /* We might see up to the maximum number of logical and physical disks
4368 * plus external target devices, and a device for the local RAID
4371 ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
4373 hpsa_ext_ctrl_present(h, physdev_list);
4375 /* Allocate the per device structures */
4376 for (i = 0; i < ndevs_to_allocate; i++) {
4377 if (i >= HPSA_MAX_DEVICES) {
4378 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
4379 " %d devices ignored.\n", HPSA_MAX_DEVICES,
4380 ndevs_to_allocate - HPSA_MAX_DEVICES);
4384 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
4385 if (!currentsd[i]) {
4386 h->drv_req_rescan = 1;
4392 if (is_scsi_rev_5(h))
4393 raid_ctlr_position = 0;
4395 raid_ctlr_position = nphysicals + nlogicals;
4397 /* adjust our table of devices */
4398 n_ext_target_devs = 0;
4399 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
4400 u8 *lunaddrbytes, is_OBDR = 0;
4402 int phys_dev_index = i - (raid_ctlr_position == 0);
4403 bool skip_device = false;
4405 memset(tmpdevice, 0, sizeof(*tmpdevice));
4407 physical_device = i < nphysicals + (raid_ctlr_position == 0);
4409 /* Figure out where the LUN ID info is coming from */
4410 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
4411 i, nphysicals, nlogicals, physdev_list, logdev_list);
4413 /* Determine if this is a lun from an external target array */
4414 tmpdevice->external =
4415 figure_external_status(h, raid_ctlr_position, i,
4416 nphysicals, nlocal_logicals);
4419 * Skip over some devices such as a spare.
4421 if (!tmpdevice->external && physical_device) {
4422 skip_device = hpsa_skip_device(h, lunaddrbytes,
4423 &physdev_list->LUN[phys_dev_index]);
4428 /* Get device type, vendor, model, device id, raid_map */
4429 rc = hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
4431 if (rc == -ENOMEM) {
4432 dev_warn(&h->pdev->dev,
4433 "Out of memory, rescan deferred.\n");
4434 h->drv_req_rescan = 1;
4438 h->drv_req_rescan = 1;
4442 figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
4443 this_device = currentsd[ncurrent];
4445 *this_device = *tmpdevice;
4446 this_device->physical_device = physical_device;
4449 * Expose all devices except for physical devices that
4452 if (MASKED_DEVICE(lunaddrbytes) && this_device->physical_device)
4453 this_device->expose_device = 0;
4455 this_device->expose_device = 1;
4459 * Get the SAS address for physical devices that are exposed.
4461 if (this_device->physical_device && this_device->expose_device)
4462 hpsa_get_sas_address(h, lunaddrbytes, this_device);
4464 switch (this_device->devtype) {
4466 /* We don't *really* support actual CD-ROM devices,
4467 * just "One Button Disaster Recovery" tape drive
4468 * which temporarily pretends to be a CD-ROM drive.
4469 * So we check that the device is really an OBDR tape
4470 * device by checking for "$DR-10" in bytes 43-48 of
4478 if (this_device->physical_device) {
4479 /* The disk is in HBA mode. */
4480 /* Never use RAID mapper in HBA mode. */
4481 this_device->offload_enabled = 0;
4482 hpsa_get_ioaccel_drive_info(h, this_device,
4483 physdev_list, phys_dev_index, id_phys);
4484 hpsa_get_path_info(this_device,
4485 physdev_list, phys_dev_index, id_phys);
4490 case TYPE_MEDIUM_CHANGER:
4493 case TYPE_ENCLOSURE:
4494 if (!this_device->external)
4495 hpsa_get_enclosure_info(h, lunaddrbytes,
4496 physdev_list, phys_dev_index,
4501 /* Only present the Smartarray HBA as a RAID controller.
4502 * If it's a RAID controller other than the HBA itself
4503 * (an external RAID controller, MSA500 or similar)
4506 if (!is_hba_lunid(lunaddrbytes))
4513 if (ncurrent >= HPSA_MAX_DEVICES)
4517 if (h->sas_host == NULL) {
4520 rc = hpsa_add_sas_host(h);
4522 dev_warn(&h->pdev->dev,
4523 "Could not add sas host %d\n", rc);
4528 adjust_hpsa_scsi_table(h, currentsd, ncurrent);
4531 for (i = 0; i < ndev_allocated; i++)
4532 kfree(currentsd[i]);
4534 kfree(physdev_list);
4540 static void hpsa_set_sg_descriptor(struct SGDescriptor *desc,
4541 struct scatterlist *sg)
4543 u64 addr64 = (u64) sg_dma_address(sg);
4544 unsigned int len = sg_dma_len(sg);
4546 desc->Addr = cpu_to_le64(addr64);
4547 desc->Len = cpu_to_le32(len);
4552 * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
4553 * dma mapping and fills in the scatter gather entries of the
4556 static int hpsa_scatter_gather(struct ctlr_info *h,
4557 struct CommandList *cp,
4558 struct scsi_cmnd *cmd)
4560 struct scatterlist *sg;
4561 int use_sg, i, sg_limit, chained, last_sg;
4562 struct SGDescriptor *curr_sg;
4564 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
4566 use_sg = scsi_dma_map(cmd);
4571 goto sglist_finished;
4574 * If the number of entries is greater than the max for a single list,
4575 * then we have a chained list; we will set up all but one entry in the
4576 * first list (the last entry is saved for link information);
4577 * otherwise, we don't have a chained list and we'll set up at each of
4578 * the entries in the one list.
4581 chained = use_sg > h->max_cmd_sg_entries;
4582 sg_limit = chained ? h->max_cmd_sg_entries - 1 : use_sg;
4583 last_sg = scsi_sg_count(cmd) - 1;
4584 scsi_for_each_sg(cmd, sg, sg_limit, i) {
4585 hpsa_set_sg_descriptor(curr_sg, sg);
4591 * Continue with the chained list. Set curr_sg to the chained
4592 * list. Modify the limit to the total count less the entries
4593 * we've already set up. Resume the scan at the list entry
4594 * where the previous loop left off.
4596 curr_sg = h->cmd_sg_list[cp->cmdindex];
4597 sg_limit = use_sg - sg_limit;
4598 for_each_sg(sg, sg, sg_limit, i) {
4599 hpsa_set_sg_descriptor(curr_sg, sg);
4604 /* Back the pointer up to the last entry and mark it as "last". */
4605 (curr_sg - 1)->Ext = cpu_to_le32(HPSA_SG_LAST);
4607 if (use_sg + chained > h->maxSG)
4608 h->maxSG = use_sg + chained;
4611 cp->Header.SGList = h->max_cmd_sg_entries;
4612 cp->Header.SGTotal = cpu_to_le16(use_sg + 1);
4613 if (hpsa_map_sg_chain_block(h, cp)) {
4614 scsi_dma_unmap(cmd);
4622 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */
4623 cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */
4627 static inline void warn_zero_length_transfer(struct ctlr_info *h,
4628 u8 *cdb, int cdb_len,
4631 dev_warn(&h->pdev->dev,
4632 "%s: Blocking zero-length request: CDB:%*phN\n",
4633 func, cdb_len, cdb);
4636 #define IO_ACCEL_INELIGIBLE 1
4637 /* zero-length transfers trigger hardware errors. */
4638 static bool is_zero_length_transfer(u8 *cdb)
4642 /* Block zero-length transfer sizes on certain commands. */
4646 case VERIFY: /* 0x2F */
4647 case WRITE_VERIFY: /* 0x2E */
4648 block_cnt = get_unaligned_be16(&cdb[7]);
4652 case VERIFY_12: /* 0xAF */
4653 case WRITE_VERIFY_12: /* 0xAE */
4654 block_cnt = get_unaligned_be32(&cdb[6]);
4658 case VERIFY_16: /* 0x8F */
4659 block_cnt = get_unaligned_be32(&cdb[10]);
4665 return block_cnt == 0;
4668 static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
4674 /* Perform some CDB fixups if needed using 10 byte reads/writes only */
4681 if (*cdb_len == 6) {
4682 block = (((cdb[1] & 0x1F) << 16) |
4689 BUG_ON(*cdb_len != 12);
4690 block = get_unaligned_be32(&cdb[2]);
4691 block_cnt = get_unaligned_be32(&cdb[6]);
4693 if (block_cnt > 0xffff)
4694 return IO_ACCEL_INELIGIBLE;
4696 cdb[0] = is_write ? WRITE_10 : READ_10;
4698 cdb[2] = (u8) (block >> 24);
4699 cdb[3] = (u8) (block >> 16);
4700 cdb[4] = (u8) (block >> 8);
4701 cdb[5] = (u8) (block);
4703 cdb[7] = (u8) (block_cnt >> 8);
4704 cdb[8] = (u8) (block_cnt);
4712 static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
4713 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4714 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4716 struct scsi_cmnd *cmd = c->scsi_cmd;
4717 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
4719 unsigned int total_len = 0;
4720 struct scatterlist *sg;
4723 struct SGDescriptor *curr_sg;
4724 u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
4726 /* TODO: implement chaining support */
4727 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
4728 atomic_dec(&phys_disk->ioaccel_cmds_out);
4729 return IO_ACCEL_INELIGIBLE;
4732 BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
4734 if (is_zero_length_transfer(cdb)) {
4735 warn_zero_length_transfer(h, cdb, cdb_len, __func__);
4736 atomic_dec(&phys_disk->ioaccel_cmds_out);
4737 return IO_ACCEL_INELIGIBLE;
4740 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
4741 atomic_dec(&phys_disk->ioaccel_cmds_out);
4742 return IO_ACCEL_INELIGIBLE;
4745 c->cmd_type = CMD_IOACCEL1;
4747 /* Adjust the DMA address to point to the accelerated command buffer */
4748 c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
4749 (c->cmdindex * sizeof(*cp));
4750 BUG_ON(c->busaddr & 0x0000007F);
4752 use_sg = scsi_dma_map(cmd);
4754 atomic_dec(&phys_disk->ioaccel_cmds_out);
4760 scsi_for_each_sg(cmd, sg, use_sg, i) {
4761 addr64 = (u64) sg_dma_address(sg);
4762 len = sg_dma_len(sg);
4764 curr_sg->Addr = cpu_to_le64(addr64);
4765 curr_sg->Len = cpu_to_le32(len);
4766 curr_sg->Ext = cpu_to_le32(0);
4769 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
4771 switch (cmd->sc_data_direction) {
4773 control |= IOACCEL1_CONTROL_DATA_OUT;
4775 case DMA_FROM_DEVICE:
4776 control |= IOACCEL1_CONTROL_DATA_IN;
4779 control |= IOACCEL1_CONTROL_NODATAXFER;
4782 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4783 cmd->sc_data_direction);
4788 control |= IOACCEL1_CONTROL_NODATAXFER;
4791 c->Header.SGList = use_sg;
4792 /* Fill out the command structure to submit */
4793 cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF);
4794 cp->transfer_len = cpu_to_le32(total_len);
4795 cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ |
4796 (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK));
4797 cp->control = cpu_to_le32(control);
4798 memcpy(cp->CDB, cdb, cdb_len);
4799 memcpy(cp->CISS_LUN, scsi3addr, 8);
4800 /* Tag was already set at init time. */
4801 enqueue_cmd_and_start_io(h, c);
4806 * Queue a command directly to a device behind the controller using the
4807 * I/O accelerator path.
4809 static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
4810 struct CommandList *c)
4812 struct scsi_cmnd *cmd = c->scsi_cmd;
4813 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4820 return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
4821 cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev);
4825 * Set encryption parameters for the ioaccel2 request
4827 static void set_encrypt_ioaccel2(struct ctlr_info *h,
4828 struct CommandList *c, struct io_accel2_cmd *cp)
4830 struct scsi_cmnd *cmd = c->scsi_cmd;
4831 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4832 struct raid_map_data *map = &dev->raid_map;
4835 /* Are we doing encryption on this device */
4836 if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON))
4838 /* Set the data encryption key index. */
4839 cp->dekindex = map->dekindex;
4841 /* Set the encryption enable flag, encoded into direction field. */
4842 cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
4844 /* Set encryption tweak values based on logical block address
4845 * If block size is 512, tweak value is LBA.
4846 * For other block sizes, tweak is (LBA * block size)/ 512)
4848 switch (cmd->cmnd[0]) {
4849 /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
4852 first_block = (((cmd->cmnd[1] & 0x1F) << 16) |
4853 (cmd->cmnd[2] << 8) |
4858 /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
4861 first_block = get_unaligned_be32(&cmd->cmnd[2]);
4865 first_block = get_unaligned_be64(&cmd->cmnd[2]);
4868 dev_err(&h->pdev->dev,
4869 "ERROR: %s: size (0x%x) not supported for encryption\n",
4870 __func__, cmd->cmnd[0]);
4875 if (le32_to_cpu(map->volume_blk_size) != 512)
4876 first_block = first_block *
4877 le32_to_cpu(map->volume_blk_size)/512;
4879 cp->tweak_lower = cpu_to_le32(first_block);
4880 cp->tweak_upper = cpu_to_le32(first_block >> 32);
4883 static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
4884 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4885 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4887 struct scsi_cmnd *cmd = c->scsi_cmd;
4888 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
4889 struct ioaccel2_sg_element *curr_sg;
4891 struct scatterlist *sg;
4899 if (!cmd->device->hostdata)
4902 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
4904 if (is_zero_length_transfer(cdb)) {
4905 warn_zero_length_transfer(h, cdb, cdb_len, __func__);
4906 atomic_dec(&phys_disk->ioaccel_cmds_out);
4907 return IO_ACCEL_INELIGIBLE;
4910 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
4911 atomic_dec(&phys_disk->ioaccel_cmds_out);
4912 return IO_ACCEL_INELIGIBLE;
4915 c->cmd_type = CMD_IOACCEL2;
4916 /* Adjust the DMA address to point to the accelerated command buffer */
4917 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
4918 (c->cmdindex * sizeof(*cp));
4919 BUG_ON(c->busaddr & 0x0000007F);
4921 memset(cp, 0, sizeof(*cp));
4922 cp->IU_type = IOACCEL2_IU_TYPE;
4924 use_sg = scsi_dma_map(cmd);
4926 atomic_dec(&phys_disk->ioaccel_cmds_out);
4932 if (use_sg > h->ioaccel_maxsg) {
4933 addr64 = le64_to_cpu(
4934 h->ioaccel2_cmd_sg_list[c->cmdindex]->address);
4935 curr_sg->address = cpu_to_le64(addr64);
4936 curr_sg->length = 0;
4937 curr_sg->reserved[0] = 0;
4938 curr_sg->reserved[1] = 0;
4939 curr_sg->reserved[2] = 0;
4940 curr_sg->chain_indicator = IOACCEL2_CHAIN;
4942 curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex];
4944 scsi_for_each_sg(cmd, sg, use_sg, i) {
4945 addr64 = (u64) sg_dma_address(sg);
4946 len = sg_dma_len(sg);
4948 curr_sg->address = cpu_to_le64(addr64);
4949 curr_sg->length = cpu_to_le32(len);
4950 curr_sg->reserved[0] = 0;
4951 curr_sg->reserved[1] = 0;
4952 curr_sg->reserved[2] = 0;
4953 curr_sg->chain_indicator = 0;
4958 * Set the last s/g element bit
4960 (curr_sg - 1)->chain_indicator = IOACCEL2_LAST_SG;
4962 switch (cmd->sc_data_direction) {
4964 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4965 cp->direction |= IOACCEL2_DIR_DATA_OUT;
4967 case DMA_FROM_DEVICE:
4968 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4969 cp->direction |= IOACCEL2_DIR_DATA_IN;
4972 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4973 cp->direction |= IOACCEL2_DIR_NO_DATA;
4976 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4977 cmd->sc_data_direction);
4982 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4983 cp->direction |= IOACCEL2_DIR_NO_DATA;
4986 /* Set encryption parameters, if necessary */
4987 set_encrypt_ioaccel2(h, c, cp);
4989 cp->scsi_nexus = cpu_to_le32(ioaccel_handle);
4990 cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT);
4991 memcpy(cp->cdb, cdb, sizeof(cp->cdb));
4993 cp->data_len = cpu_to_le32(total_len);
4994 cp->err_ptr = cpu_to_le64(c->busaddr +
4995 offsetof(struct io_accel2_cmd, error_data));
4996 cp->err_len = cpu_to_le32(sizeof(cp->error_data));
4998 /* fill in sg elements */
4999 if (use_sg > h->ioaccel_maxsg) {
5001 cp->sg[0].length = cpu_to_le32(use_sg * sizeof(cp->sg[0]));
5002 if (hpsa_map_ioaccel2_sg_chain_block(h, cp, c)) {
5003 atomic_dec(&phys_disk->ioaccel_cmds_out);
5004 scsi_dma_unmap(cmd);
5008 cp->sg_count = (u8) use_sg;
5010 enqueue_cmd_and_start_io(h, c);
5015 * Queue a command to the correct I/O accelerator path.
5017 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
5018 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
5019 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
5021 if (!c->scsi_cmd->device)
5024 if (!c->scsi_cmd->device->hostdata)
5027 /* Try to honor the device's queue depth */
5028 if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
5029 phys_disk->queue_depth) {
5030 atomic_dec(&phys_disk->ioaccel_cmds_out);
5031 return IO_ACCEL_INELIGIBLE;
5033 if (h->transMethod & CFGTBL_Trans_io_accel1)
5034 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
5035 cdb, cdb_len, scsi3addr,
5038 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
5039 cdb, cdb_len, scsi3addr,
5043 static void raid_map_helper(struct raid_map_data *map,
5044 int offload_to_mirror, u32 *map_index, u32 *current_group)
5046 if (offload_to_mirror == 0) {
5047 /* use physical disk in the first mirrored group. */
5048 *map_index %= le16_to_cpu(map->data_disks_per_row);
5052 /* determine mirror group that *map_index indicates */
5053 *current_group = *map_index /
5054 le16_to_cpu(map->data_disks_per_row);
5055 if (offload_to_mirror == *current_group)
5057 if (*current_group < le16_to_cpu(map->layout_map_count) - 1) {
5058 /* select map index from next group */
5059 *map_index += le16_to_cpu(map->data_disks_per_row);
5062 /* select map index from first group */
5063 *map_index %= le16_to_cpu(map->data_disks_per_row);
5066 } while (offload_to_mirror != *current_group);
5070 * Attempt to perform offload RAID mapping for a logical volume I/O.
5072 static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
5073 struct CommandList *c)
5075 struct scsi_cmnd *cmd = c->scsi_cmd;
5076 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
5077 struct raid_map_data *map = &dev->raid_map;
5078 struct raid_map_disk_data *dd = &map->data[0];
5081 u64 first_block, last_block;
5084 u64 first_row, last_row;
5085 u32 first_row_offset, last_row_offset;
5086 u32 first_column, last_column;
5087 u64 r0_first_row, r0_last_row;
5088 u32 r5or6_blocks_per_row;
5089 u64 r5or6_first_row, r5or6_last_row;
5090 u32 r5or6_first_row_offset, r5or6_last_row_offset;
5091 u32 r5or6_first_column, r5or6_last_column;
5092 u32 total_disks_per_row;
5094 u32 first_group, last_group, current_group;
5102 #if BITS_PER_LONG == 32
5105 int offload_to_mirror;
5110 /* check for valid opcode, get LBA and block count */
5111 switch (cmd->cmnd[0]) {
5115 first_block = (((cmd->cmnd[1] & 0x1F) << 16) |
5116 (cmd->cmnd[2] << 8) |
5118 block_cnt = cmd->cmnd[4];
5126 (((u64) cmd->cmnd[2]) << 24) |
5127 (((u64) cmd->cmnd[3]) << 16) |
5128 (((u64) cmd->cmnd[4]) << 8) |
5131 (((u32) cmd->cmnd[7]) << 8) |
5138 (((u64) cmd->cmnd[2]) << 24) |
5139 (((u64) cmd->cmnd[3]) << 16) |
5140 (((u64) cmd->cmnd[4]) << 8) |
5143 (((u32) cmd->cmnd[6]) << 24) |
5144 (((u32) cmd->cmnd[7]) << 16) |
5145 (((u32) cmd->cmnd[8]) << 8) |
5152 (((u64) cmd->cmnd[2]) << 56) |
5153 (((u64) cmd->cmnd[3]) << 48) |
5154 (((u64) cmd->cmnd[4]) << 40) |
5155 (((u64) cmd->cmnd[5]) << 32) |
5156 (((u64) cmd->cmnd[6]) << 24) |
5157 (((u64) cmd->cmnd[7]) << 16) |
5158 (((u64) cmd->cmnd[8]) << 8) |
5161 (((u32) cmd->cmnd[10]) << 24) |
5162 (((u32) cmd->cmnd[11]) << 16) |
5163 (((u32) cmd->cmnd[12]) << 8) |
5167 return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */
5169 last_block = first_block + block_cnt - 1;
5171 /* check for write to non-RAID-0 */
5172 if (is_write && dev->raid_level != 0)
5173 return IO_ACCEL_INELIGIBLE;
5175 /* check for invalid block or wraparound */
5176 if (last_block >= le64_to_cpu(map->volume_blk_cnt) ||
5177 last_block < first_block)
5178 return IO_ACCEL_INELIGIBLE;
5180 /* calculate stripe information for the request */
5181 blocks_per_row = le16_to_cpu(map->data_disks_per_row) *
5182 le16_to_cpu(map->strip_size);
5183 strip_size = le16_to_cpu(map->strip_size);
5184 #if BITS_PER_LONG == 32
5185 tmpdiv = first_block;
5186 (void) do_div(tmpdiv, blocks_per_row);
5188 tmpdiv = last_block;
5189 (void) do_div(tmpdiv, blocks_per_row);
5191 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
5192 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
5193 tmpdiv = first_row_offset;
5194 (void) do_div(tmpdiv, strip_size);
5195 first_column = tmpdiv;
5196 tmpdiv = last_row_offset;
5197 (void) do_div(tmpdiv, strip_size);
5198 last_column = tmpdiv;
5200 first_row = first_block / blocks_per_row;
5201 last_row = last_block / blocks_per_row;
5202 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
5203 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
5204 first_column = first_row_offset / strip_size;
5205 last_column = last_row_offset / strip_size;
5208 /* if this isn't a single row/column then give to the controller */
5209 if ((first_row != last_row) || (first_column != last_column))
5210 return IO_ACCEL_INELIGIBLE;
5212 /* proceeding with driver mapping */
5213 total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
5214 le16_to_cpu(map->metadata_disks_per_row);
5215 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
5216 le16_to_cpu(map->row_cnt);
5217 map_index = (map_row * total_disks_per_row) + first_column;
5219 switch (dev->raid_level) {
5221 break; /* nothing special to do */
5223 /* Handles load balance across RAID 1 members.
5224 * (2-drive R1 and R10 with even # of drives.)
5225 * Appropriate for SSDs, not optimal for HDDs
5226 * Ensure we have the correct raid_map.
5228 if (le16_to_cpu(map->layout_map_count) != 2) {
5229 hpsa_turn_off_ioaccel_for_device(dev);
5230 return IO_ACCEL_INELIGIBLE;
5232 if (dev->offload_to_mirror)
5233 map_index += le16_to_cpu(map->data_disks_per_row);
5234 dev->offload_to_mirror = !dev->offload_to_mirror;
5237 /* Handles N-way mirrors (R1-ADM)
5238 * and R10 with # of drives divisible by 3.)
5239 * Ensure we have the correct raid_map.
5241 if (le16_to_cpu(map->layout_map_count) != 3) {
5242 hpsa_turn_off_ioaccel_for_device(dev);
5243 return IO_ACCEL_INELIGIBLE;
5246 offload_to_mirror = dev->offload_to_mirror;
5247 raid_map_helper(map, offload_to_mirror,
5248 &map_index, ¤t_group);
5249 /* set mirror group to use next time */
5251 (offload_to_mirror >=
5252 le16_to_cpu(map->layout_map_count) - 1)
5253 ? 0 : offload_to_mirror + 1;
5254 dev->offload_to_mirror = offload_to_mirror;
5255 /* Avoid direct use of dev->offload_to_mirror within this
5256 * function since multiple threads might simultaneously
5257 * increment it beyond the range of dev->layout_map_count -1.
5262 if (le16_to_cpu(map->layout_map_count) <= 1)
5265 /* Verify first and last block are in same RAID group */
5266 r5or6_blocks_per_row =
5267 le16_to_cpu(map->strip_size) *
5268 le16_to_cpu(map->data_disks_per_row);
5269 if (r5or6_blocks_per_row == 0) {
5270 hpsa_turn_off_ioaccel_for_device(dev);
5271 return IO_ACCEL_INELIGIBLE;
5273 stripesize = r5or6_blocks_per_row *
5274 le16_to_cpu(map->layout_map_count);
5275 #if BITS_PER_LONG == 32
5276 tmpdiv = first_block;
5277 first_group = do_div(tmpdiv, stripesize);
5278 tmpdiv = first_group;
5279 (void) do_div(tmpdiv, r5or6_blocks_per_row);
5280 first_group = tmpdiv;
5281 tmpdiv = last_block;
5282 last_group = do_div(tmpdiv, stripesize);
5283 tmpdiv = last_group;
5284 (void) do_div(tmpdiv, r5or6_blocks_per_row);
5285 last_group = tmpdiv;
5287 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
5288 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
5290 if (first_group != last_group)
5291 return IO_ACCEL_INELIGIBLE;
5293 /* Verify request is in a single row of RAID 5/6 */
5294 #if BITS_PER_LONG == 32
5295 tmpdiv = first_block;
5296 (void) do_div(tmpdiv, stripesize);
5297 first_row = r5or6_first_row = r0_first_row = tmpdiv;
5298 tmpdiv = last_block;
5299 (void) do_div(tmpdiv, stripesize);
5300 r5or6_last_row = r0_last_row = tmpdiv;
5302 first_row = r5or6_first_row = r0_first_row =
5303 first_block / stripesize;
5304 r5or6_last_row = r0_last_row = last_block / stripesize;
5306 if (r5or6_first_row != r5or6_last_row)
5307 return IO_ACCEL_INELIGIBLE;
5310 /* Verify request is in a single column */
5311 #if BITS_PER_LONG == 32
5312 tmpdiv = first_block;
5313 first_row_offset = do_div(tmpdiv, stripesize);
5314 tmpdiv = first_row_offset;
5315 first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
5316 r5or6_first_row_offset = first_row_offset;
5317 tmpdiv = last_block;
5318 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
5319 tmpdiv = r5or6_last_row_offset;
5320 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
5321 tmpdiv = r5or6_first_row_offset;
5322 (void) do_div(tmpdiv, map->strip_size);
5323 first_column = r5or6_first_column = tmpdiv;
5324 tmpdiv = r5or6_last_row_offset;
5325 (void) do_div(tmpdiv, map->strip_size);
5326 r5or6_last_column = tmpdiv;
5328 first_row_offset = r5or6_first_row_offset =
5329 (u32)((first_block % stripesize) %
5330 r5or6_blocks_per_row);
5332 r5or6_last_row_offset =
5333 (u32)((last_block % stripesize) %
5334 r5or6_blocks_per_row);
5336 first_column = r5or6_first_column =
5337 r5or6_first_row_offset / le16_to_cpu(map->strip_size);
5339 r5or6_last_row_offset / le16_to_cpu(map->strip_size);
5341 if (r5or6_first_column != r5or6_last_column)
5342 return IO_ACCEL_INELIGIBLE;
5344 /* Request is eligible */
5345 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
5346 le16_to_cpu(map->row_cnt);
5348 map_index = (first_group *
5349 (le16_to_cpu(map->row_cnt) * total_disks_per_row)) +
5350 (map_row * total_disks_per_row) + first_column;
5353 return IO_ACCEL_INELIGIBLE;
5356 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
5357 return IO_ACCEL_INELIGIBLE;
5359 c->phys_disk = dev->phys_disk[map_index];
5361 return IO_ACCEL_INELIGIBLE;
5363 disk_handle = dd[map_index].ioaccel_handle;
5364 disk_block = le64_to_cpu(map->disk_starting_blk) +
5365 first_row * le16_to_cpu(map->strip_size) +
5366 (first_row_offset - first_column *
5367 le16_to_cpu(map->strip_size));
5368 disk_block_cnt = block_cnt;
5370 /* handle differing logical/physical block sizes */
5371 if (map->phys_blk_shift) {
5372 disk_block <<= map->phys_blk_shift;
5373 disk_block_cnt <<= map->phys_blk_shift;
5375 BUG_ON(disk_block_cnt > 0xffff);
5377 /* build the new CDB for the physical disk I/O */
5378 if (disk_block > 0xffffffff) {
5379 cdb[0] = is_write ? WRITE_16 : READ_16;
5381 cdb[2] = (u8) (disk_block >> 56);
5382 cdb[3] = (u8) (disk_block >> 48);
5383 cdb[4] = (u8) (disk_block >> 40);
5384 cdb[5] = (u8) (disk_block >> 32);
5385 cdb[6] = (u8) (disk_block >> 24);
5386 cdb[7] = (u8) (disk_block >> 16);
5387 cdb[8] = (u8) (disk_block >> 8);
5388 cdb[9] = (u8) (disk_block);
5389 cdb[10] = (u8) (disk_block_cnt >> 24);
5390 cdb[11] = (u8) (disk_block_cnt >> 16);
5391 cdb[12] = (u8) (disk_block_cnt >> 8);
5392 cdb[13] = (u8) (disk_block_cnt);
5397 cdb[0] = is_write ? WRITE_10 : READ_10;
5399 cdb[2] = (u8) (disk_block >> 24);
5400 cdb[3] = (u8) (disk_block >> 16);
5401 cdb[4] = (u8) (disk_block >> 8);
5402 cdb[5] = (u8) (disk_block);
5404 cdb[7] = (u8) (disk_block_cnt >> 8);
5405 cdb[8] = (u8) (disk_block_cnt);
5409 return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
5411 dev->phys_disk[map_index]);
5415 * Submit commands down the "normal" RAID stack path
5416 * All callers to hpsa_ciss_submit must check lockup_detected
5417 * beforehand, before (opt.) and after calling cmd_alloc
5419 static int hpsa_ciss_submit(struct ctlr_info *h,
5420 struct CommandList *c, struct scsi_cmnd *cmd,
5421 unsigned char scsi3addr[])
5423 cmd->host_scribble = (unsigned char *) c;
5424 c->cmd_type = CMD_SCSI;
5426 c->Header.ReplyQueue = 0; /* unused in simple mode */
5427 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
5428 c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT));
5430 /* Fill in the request block... */
5432 c->Request.Timeout = 0;
5433 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
5434 c->Request.CDBLen = cmd->cmd_len;
5435 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
5436 switch (cmd->sc_data_direction) {
5438 c->Request.type_attr_dir =
5439 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE);
5441 case DMA_FROM_DEVICE:
5442 c->Request.type_attr_dir =
5443 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ);
5446 c->Request.type_attr_dir =
5447 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE);
5449 case DMA_BIDIRECTIONAL:
5450 /* This can happen if a buggy application does a scsi passthru
5451 * and sets both inlen and outlen to non-zero. ( see
5452 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
5455 c->Request.type_attr_dir =
5456 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD);
5457 /* This is technically wrong, and hpsa controllers should
5458 * reject it with CMD_INVALID, which is the most correct
5459 * response, but non-fibre backends appear to let it
5460 * slide by, and give the same results as if this field
5461 * were set correctly. Either way is acceptable for
5462 * our purposes here.
5468 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
5469 cmd->sc_data_direction);
5474 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
5475 hpsa_cmd_resolve_and_free(h, c);
5476 return SCSI_MLQUEUE_HOST_BUSY;
5478 enqueue_cmd_and_start_io(h, c);
5479 /* the cmd'll come back via intr handler in complete_scsi_command() */
5483 static void hpsa_cmd_init(struct ctlr_info *h, int index,
5484 struct CommandList *c)
5486 dma_addr_t cmd_dma_handle, err_dma_handle;
5488 /* Zero out all of commandlist except the last field, refcount */
5489 memset(c, 0, offsetof(struct CommandList, refcount));
5490 c->Header.tag = cpu_to_le64((u64) (index << DIRECT_LOOKUP_SHIFT));
5491 cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
5492 c->err_info = h->errinfo_pool + index;
5493 memset(c->err_info, 0, sizeof(*c->err_info));
5494 err_dma_handle = h->errinfo_pool_dhandle
5495 + index * sizeof(*c->err_info);
5496 c->cmdindex = index;
5497 c->busaddr = (u32) cmd_dma_handle;
5498 c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle);
5499 c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info));
5501 c->scsi_cmd = SCSI_CMD_IDLE;
5504 static void hpsa_preinitialize_commands(struct ctlr_info *h)
5508 for (i = 0; i < h->nr_cmds; i++) {
5509 struct CommandList *c = h->cmd_pool + i;
5511 hpsa_cmd_init(h, i, c);
5512 atomic_set(&c->refcount, 0);
5516 static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index,
5517 struct CommandList *c)
5519 dma_addr_t cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
5521 BUG_ON(c->cmdindex != index);
5523 memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
5524 memset(c->err_info, 0, sizeof(*c->err_info));
5525 c->busaddr = (u32) cmd_dma_handle;
5528 static int hpsa_ioaccel_submit(struct ctlr_info *h,
5529 struct CommandList *c, struct scsi_cmnd *cmd,
5530 unsigned char *scsi3addr)
5532 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
5533 int rc = IO_ACCEL_INELIGIBLE;
5536 return SCSI_MLQUEUE_HOST_BUSY;
5538 cmd->host_scribble = (unsigned char *) c;
5540 if (dev->offload_enabled) {
5541 hpsa_cmd_init(h, c->cmdindex, c);
5542 c->cmd_type = CMD_SCSI;
5544 rc = hpsa_scsi_ioaccel_raid_map(h, c);
5545 if (rc < 0) /* scsi_dma_map failed. */
5546 rc = SCSI_MLQUEUE_HOST_BUSY;
5547 } else if (dev->hba_ioaccel_enabled) {
5548 hpsa_cmd_init(h, c->cmdindex, c);
5549 c->cmd_type = CMD_SCSI;
5551 rc = hpsa_scsi_ioaccel_direct_map(h, c);
5552 if (rc < 0) /* scsi_dma_map failed. */
5553 rc = SCSI_MLQUEUE_HOST_BUSY;
5558 static void hpsa_command_resubmit_worker(struct work_struct *work)
5560 struct scsi_cmnd *cmd;
5561 struct hpsa_scsi_dev_t *dev;
5562 struct CommandList *c = container_of(work, struct CommandList, work);
5565 dev = cmd->device->hostdata;
5567 cmd->result = DID_NO_CONNECT << 16;
5568 return hpsa_cmd_free_and_done(c->h, c, cmd);
5570 if (c->reset_pending)
5571 return hpsa_cmd_free_and_done(c->h, c, cmd);
5572 if (c->cmd_type == CMD_IOACCEL2) {
5573 struct ctlr_info *h = c->h;
5574 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
5577 if (c2->error_data.serv_response ==
5578 IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) {
5579 rc = hpsa_ioaccel_submit(h, c, cmd, dev->scsi3addr);
5582 if (rc == SCSI_MLQUEUE_HOST_BUSY) {
5584 * If we get here, it means dma mapping failed.
5585 * Try again via scsi mid layer, which will
5586 * then get SCSI_MLQUEUE_HOST_BUSY.
5588 cmd->result = DID_IMM_RETRY << 16;
5589 return hpsa_cmd_free_and_done(h, c, cmd);
5591 /* else, fall thru and resubmit down CISS path */
5594 hpsa_cmd_partial_init(c->h, c->cmdindex, c);
5595 if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) {
5597 * If we get here, it means dma mapping failed. Try
5598 * again via scsi mid layer, which will then get
5599 * SCSI_MLQUEUE_HOST_BUSY.
5601 * hpsa_ciss_submit will have already freed c
5602 * if it encountered a dma mapping failure.
5604 cmd->result = DID_IMM_RETRY << 16;
5605 cmd->scsi_done(cmd);
5609 /* Running in struct Scsi_Host->host_lock less mode */
5610 static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
5612 struct ctlr_info *h;
5613 struct hpsa_scsi_dev_t *dev;
5614 unsigned char scsi3addr[8];
5615 struct CommandList *c;
5618 /* Get the ptr to our adapter structure out of cmd->host. */
5619 h = sdev_to_hba(cmd->device);
5621 BUG_ON(cmd->request->tag < 0);
5623 dev = cmd->device->hostdata;
5625 cmd->result = DID_NO_CONNECT << 16;
5626 cmd->scsi_done(cmd);
5631 cmd->result = DID_NO_CONNECT << 16;
5632 cmd->scsi_done(cmd);
5636 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
5638 if (unlikely(lockup_detected(h))) {
5639 cmd->result = DID_NO_CONNECT << 16;
5640 cmd->scsi_done(cmd);
5643 c = cmd_tagged_alloc(h, cmd);
5646 * This is necessary because the SML doesn't zero out this field during
5652 * Call alternate submit routine for I/O accelerated commands.
5653 * Retries always go down the normal I/O path.
5655 if (likely(cmd->retries == 0 &&
5656 !blk_rq_is_passthrough(cmd->request) &&
5657 h->acciopath_status)) {
5658 rc = hpsa_ioaccel_submit(h, c, cmd, scsi3addr);
5661 if (rc == SCSI_MLQUEUE_HOST_BUSY) {
5662 hpsa_cmd_resolve_and_free(h, c);
5663 return SCSI_MLQUEUE_HOST_BUSY;
5666 return hpsa_ciss_submit(h, c, cmd, scsi3addr);
5669 static void hpsa_scan_complete(struct ctlr_info *h)
5671 unsigned long flags;
5673 spin_lock_irqsave(&h->scan_lock, flags);
5674 h->scan_finished = 1;
5675 wake_up(&h->scan_wait_queue);
5676 spin_unlock_irqrestore(&h->scan_lock, flags);
5679 static void hpsa_scan_start(struct Scsi_Host *sh)
5681 struct ctlr_info *h = shost_to_hba(sh);
5682 unsigned long flags;
5685 * Don't let rescans be initiated on a controller known to be locked
5686 * up. If the controller locks up *during* a rescan, that thread is
5687 * probably hosed, but at least we can prevent new rescan threads from
5688 * piling up on a locked up controller.
5690 if (unlikely(lockup_detected(h)))
5691 return hpsa_scan_complete(h);
5694 * If a scan is already waiting to run, no need to add another
5696 spin_lock_irqsave(&h->scan_lock, flags);
5697 if (h->scan_waiting) {
5698 spin_unlock_irqrestore(&h->scan_lock, flags);
5702 spin_unlock_irqrestore(&h->scan_lock, flags);
5704 /* wait until any scan already in progress is finished. */
5706 spin_lock_irqsave(&h->scan_lock, flags);
5707 if (h->scan_finished)
5709 h->scan_waiting = 1;
5710 spin_unlock_irqrestore(&h->scan_lock, flags);
5711 wait_event(h->scan_wait_queue, h->scan_finished);
5712 /* Note: We don't need to worry about a race between this
5713 * thread and driver unload because the midlayer will
5714 * have incremented the reference count, so unload won't
5715 * happen if we're in here.
5718 h->scan_finished = 0; /* mark scan as in progress */
5719 h->scan_waiting = 0;
5720 spin_unlock_irqrestore(&h->scan_lock, flags);
5722 if (unlikely(lockup_detected(h)))
5723 return hpsa_scan_complete(h);
5726 * Do the scan after a reset completion
5728 spin_lock_irqsave(&h->reset_lock, flags);
5729 if (h->reset_in_progress) {
5730 h->drv_req_rescan = 1;
5731 spin_unlock_irqrestore(&h->reset_lock, flags);
5732 hpsa_scan_complete(h);
5735 spin_unlock_irqrestore(&h->reset_lock, flags);
5737 hpsa_update_scsi_devices(h);
5739 hpsa_scan_complete(h);
5742 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth)
5744 struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata;
5751 else if (qdepth > logical_drive->queue_depth)
5752 qdepth = logical_drive->queue_depth;
5754 return scsi_change_queue_depth(sdev, qdepth);
5757 static int hpsa_scan_finished(struct Scsi_Host *sh,
5758 unsigned long elapsed_time)
5760 struct ctlr_info *h = shost_to_hba(sh);
5761 unsigned long flags;
5764 spin_lock_irqsave(&h->scan_lock, flags);
5765 finished = h->scan_finished;
5766 spin_unlock_irqrestore(&h->scan_lock, flags);
5770 static int hpsa_scsi_host_alloc(struct ctlr_info *h)
5772 struct Scsi_Host *sh;
5774 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(struct ctlr_info));
5776 dev_err(&h->pdev->dev, "scsi_host_alloc failed\n");
5783 sh->max_channel = 3;
5784 sh->max_cmd_len = MAX_COMMAND_SIZE;
5785 sh->max_lun = HPSA_MAX_LUN;
5786 sh->max_id = HPSA_MAX_LUN;
5787 sh->can_queue = h->nr_cmds - HPSA_NRESERVED_CMDS;
5788 sh->cmd_per_lun = sh->can_queue;
5789 sh->sg_tablesize = h->maxsgentries;
5790 sh->transportt = hpsa_sas_transport_template;
5791 sh->hostdata[0] = (unsigned long) h;
5792 sh->irq = pci_irq_vector(h->pdev, 0);
5793 sh->unique_id = sh->irq;
5799 static int hpsa_scsi_add_host(struct ctlr_info *h)
5803 rv = scsi_add_host(h->scsi_host, &h->pdev->dev);
5805 dev_err(&h->pdev->dev, "scsi_add_host failed\n");
5808 scsi_scan_host(h->scsi_host);
5813 * The block layer has already gone to the trouble of picking out a unique,
5814 * small-integer tag for this request. We use an offset from that value as
5815 * an index to select our command block. (The offset allows us to reserve the
5816 * low-numbered entries for our own uses.)
5818 static int hpsa_get_cmd_index(struct scsi_cmnd *scmd)
5820 int idx = scmd->request->tag;
5825 /* Offset to leave space for internal cmds. */
5826 return idx += HPSA_NRESERVED_CMDS;
5830 * Send a TEST_UNIT_READY command to the specified LUN using the specified
5831 * reply queue; returns zero if the unit is ready, and non-zero otherwise.
5833 static int hpsa_send_test_unit_ready(struct ctlr_info *h,
5834 struct CommandList *c, unsigned char lunaddr[],
5839 /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
5840 (void) fill_cmd(c, TEST_UNIT_READY, h,
5841 NULL, 0, 0, lunaddr, TYPE_CMD);
5842 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT);
5845 /* no unmap needed here because no data xfer. */
5847 /* Check if the unit is already ready. */
5848 if (c->err_info->CommandStatus == CMD_SUCCESS)
5852 * The first command sent after reset will receive "unit attention" to
5853 * indicate that the LUN has been reset...this is actually what we're
5854 * looking for (but, success is good too).
5856 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
5857 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
5858 (c->err_info->SenseInfo[2] == NO_SENSE ||
5859 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
5866 * Wait for a TEST_UNIT_READY command to complete, retrying as necessary;
5867 * returns zero when the unit is ready, and non-zero when giving up.
5869 static int hpsa_wait_for_test_unit_ready(struct ctlr_info *h,
5870 struct CommandList *c,
5871 unsigned char lunaddr[], int reply_queue)
5875 int waittime = 1; /* seconds */
5877 /* Send test unit ready until device ready, or give up. */
5878 for (count = 0; count < HPSA_TUR_RETRY_LIMIT; count++) {
5881 * Wait for a bit. do this first, because if we send
5882 * the TUR right away, the reset will just abort it.
5884 msleep(1000 * waittime);
5886 rc = hpsa_send_test_unit_ready(h, c, lunaddr, reply_queue);
5890 /* Increase wait time with each try, up to a point. */
5891 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
5894 dev_warn(&h->pdev->dev,
5895 "waiting %d secs for device to become ready.\n",
5902 static int wait_for_device_to_become_ready(struct ctlr_info *h,
5903 unsigned char lunaddr[],
5910 struct CommandList *c;
5915 * If no specific reply queue was requested, then send the TUR
5916 * repeatedly, requesting a reply on each reply queue; otherwise execute
5917 * the loop exactly once using only the specified queue.
5919 if (reply_queue == DEFAULT_REPLY_QUEUE) {
5921 last_queue = h->nreply_queues - 1;
5923 first_queue = reply_queue;
5924 last_queue = reply_queue;
5927 for (rq = first_queue; rq <= last_queue; rq++) {
5928 rc = hpsa_wait_for_test_unit_ready(h, c, lunaddr, rq);
5934 dev_warn(&h->pdev->dev, "giving up on device.\n");
5936 dev_warn(&h->pdev->dev, "device is ready.\n");
5942 /* Need at least one of these error handlers to keep ../scsi/hosts.c from
5943 * complaining. Doing a host- or bus-reset can't do anything good here.
5945 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
5948 struct ctlr_info *h;
5949 struct hpsa_scsi_dev_t *dev;
5952 unsigned long flags;
5954 /* find the controller to which the command to be aborted was sent */
5955 h = sdev_to_hba(scsicmd->device);
5956 if (h == NULL) /* paranoia */
5959 spin_lock_irqsave(&h->reset_lock, flags);
5960 h->reset_in_progress = 1;
5961 spin_unlock_irqrestore(&h->reset_lock, flags);
5963 if (lockup_detected(h)) {
5965 goto return_reset_status;
5968 dev = scsicmd->device->hostdata;
5970 dev_err(&h->pdev->dev, "%s: device lookup failed\n", __func__);
5972 goto return_reset_status;
5975 if (dev->devtype == TYPE_ENCLOSURE) {
5977 goto return_reset_status;
5980 /* if controller locked up, we can guarantee command won't complete */
5981 if (lockup_detected(h)) {
5982 snprintf(msg, sizeof(msg),
5983 "cmd %d RESET FAILED, lockup detected",
5984 hpsa_get_cmd_index(scsicmd));
5985 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5987 goto return_reset_status;
5990 /* this reset request might be the result of a lockup; check */
5991 if (detect_controller_lockup(h)) {
5992 snprintf(msg, sizeof(msg),
5993 "cmd %d RESET FAILED, new lockup detected",
5994 hpsa_get_cmd_index(scsicmd));
5995 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5997 goto return_reset_status;
6000 /* Do not attempt on controller */
6001 if (is_hba_lunid(dev->scsi3addr)) {
6003 goto return_reset_status;
6006 if (is_logical_dev_addr_mode(dev->scsi3addr))
6007 reset_type = HPSA_DEVICE_RESET_MSG;
6009 reset_type = HPSA_PHYS_TARGET_RESET;
6011 sprintf(msg, "resetting %s",
6012 reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ");
6013 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
6015 /* send a reset to the SCSI LUN which the command was sent to */
6016 rc = hpsa_do_reset(h, dev, dev->scsi3addr, reset_type,
6017 DEFAULT_REPLY_QUEUE);
6023 sprintf(msg, "reset %s %s",
6024 reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ",
6025 rc == SUCCESS ? "completed successfully" : "failed");
6026 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
6028 return_reset_status:
6029 spin_lock_irqsave(&h->reset_lock, flags);
6030 h->reset_in_progress = 0;
6031 spin_unlock_irqrestore(&h->reset_lock, flags);
6036 * For operations with an associated SCSI command, a command block is allocated
6037 * at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the
6038 * block request tag as an index into a table of entries. cmd_tagged_free() is
6039 * the complement, although cmd_free() may be called instead.
6041 static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
6042 struct scsi_cmnd *scmd)
6044 int idx = hpsa_get_cmd_index(scmd);
6045 struct CommandList *c = h->cmd_pool + idx;
6047 if (idx < HPSA_NRESERVED_CMDS || idx >= h->nr_cmds) {
6048 dev_err(&h->pdev->dev, "Bad block tag: %d not in [%d..%d]\n",
6049 idx, HPSA_NRESERVED_CMDS, h->nr_cmds - 1);
6050 /* The index value comes from the block layer, so if it's out of
6051 * bounds, it's probably not our bug.
6056 atomic_inc(&c->refcount);
6057 if (unlikely(!hpsa_is_cmd_idle(c))) {
6059 * We expect that the SCSI layer will hand us a unique tag
6060 * value. Thus, there should never be a collision here between
6061 * two requests...because if the selected command isn't idle
6062 * then someone is going to be very disappointed.
6064 dev_err(&h->pdev->dev,
6065 "tag collision (tag=%d) in cmd_tagged_alloc().\n",
6067 if (c->scsi_cmd != NULL)
6068 scsi_print_command(c->scsi_cmd);
6069 scsi_print_command(scmd);
6072 hpsa_cmd_partial_init(h, idx, c);
6076 static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c)
6079 * Release our reference to the block. We don't need to do anything
6080 * else to free it, because it is accessed by index.
6082 (void)atomic_dec(&c->refcount);
6086 * For operations that cannot sleep, a command block is allocated at init,
6087 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
6088 * which ones are free or in use. Lock must be held when calling this.
6089 * cmd_free() is the complement.
6090 * This function never gives up and returns NULL. If it hangs,
6091 * another thread must call cmd_free() to free some tags.
6094 static struct CommandList *cmd_alloc(struct ctlr_info *h)
6096 struct CommandList *c;
6101 * There is some *extremely* small but non-zero chance that that
6102 * multiple threads could get in here, and one thread could
6103 * be scanning through the list of bits looking for a free
6104 * one, but the free ones are always behind him, and other
6105 * threads sneak in behind him and eat them before he can
6106 * get to them, so that while there is always a free one, a
6107 * very unlucky thread might be starved anyway, never able to
6108 * beat the other threads. In reality, this happens so
6109 * infrequently as to be indistinguishable from never.
6111 * Note that we start allocating commands before the SCSI host structure
6112 * is initialized. Since the search starts at bit zero, this
6113 * all works, since we have at least one command structure available;
6114 * however, it means that the structures with the low indexes have to be
6115 * reserved for driver-initiated requests, while requests from the block
6116 * layer will use the higher indexes.
6120 i = find_next_zero_bit(h->cmd_pool_bits,
6121 HPSA_NRESERVED_CMDS,
6123 if (unlikely(i >= HPSA_NRESERVED_CMDS)) {
6127 c = h->cmd_pool + i;
6128 refcount = atomic_inc_return(&c->refcount);
6129 if (unlikely(refcount > 1)) {
6130 cmd_free(h, c); /* already in use */
6131 offset = (i + 1) % HPSA_NRESERVED_CMDS;
6134 set_bit(i & (BITS_PER_LONG - 1),
6135 h->cmd_pool_bits + (i / BITS_PER_LONG));
6136 break; /* it's ours now. */
6138 hpsa_cmd_partial_init(h, i, c);
6143 * This is the complementary operation to cmd_alloc(). Note, however, in some
6144 * corner cases it may also be used to free blocks allocated by
6145 * cmd_tagged_alloc() in which case the ref-count decrement does the trick and
6146 * the clear-bit is harmless.
6148 static void cmd_free(struct ctlr_info *h, struct CommandList *c)
6150 if (atomic_dec_and_test(&c->refcount)) {
6153 i = c - h->cmd_pool;
6154 clear_bit(i & (BITS_PER_LONG - 1),
6155 h->cmd_pool_bits + (i / BITS_PER_LONG));
6159 #ifdef CONFIG_COMPAT
6161 static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd,
6164 IOCTL32_Command_struct __user *arg32 =
6165 (IOCTL32_Command_struct __user *) arg;
6166 IOCTL_Command_struct arg64;
6167 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
6171 memset(&arg64, 0, sizeof(arg64));
6173 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
6174 sizeof(arg64.LUN_info));
6175 err |= copy_from_user(&arg64.Request, &arg32->Request,
6176 sizeof(arg64.Request));
6177 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
6178 sizeof(arg64.error_info));
6179 err |= get_user(arg64.buf_size, &arg32->buf_size);
6180 err |= get_user(cp, &arg32->buf);
6181 arg64.buf = compat_ptr(cp);
6182 err |= copy_to_user(p, &arg64, sizeof(arg64));
6187 err = hpsa_ioctl(dev, CCISS_PASSTHRU, p);
6190 err |= copy_in_user(&arg32->error_info, &p->error_info,
6191 sizeof(arg32->error_info));
6197 static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
6198 int cmd, void __user *arg)
6200 BIG_IOCTL32_Command_struct __user *arg32 =
6201 (BIG_IOCTL32_Command_struct __user *) arg;
6202 BIG_IOCTL_Command_struct arg64;
6203 BIG_IOCTL_Command_struct __user *p =
6204 compat_alloc_user_space(sizeof(arg64));
6208 memset(&arg64, 0, sizeof(arg64));
6210 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
6211 sizeof(arg64.LUN_info));
6212 err |= copy_from_user(&arg64.Request, &arg32->Request,
6213 sizeof(arg64.Request));
6214 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
6215 sizeof(arg64.error_info));
6216 err |= get_user(arg64.buf_size, &arg32->buf_size);
6217 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
6218 err |= get_user(cp, &arg32->buf);
6219 arg64.buf = compat_ptr(cp);
6220 err |= copy_to_user(p, &arg64, sizeof(arg64));
6225 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p);
6228 err |= copy_in_user(&arg32->error_info, &p->error_info,
6229 sizeof(arg32->error_info));
6235 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
6238 case CCISS_GETPCIINFO:
6239 case CCISS_GETINTINFO:
6240 case CCISS_SETINTINFO:
6241 case CCISS_GETNODENAME:
6242 case CCISS_SETNODENAME:
6243 case CCISS_GETHEARTBEAT:
6244 case CCISS_GETBUSTYPES:
6245 case CCISS_GETFIRMVER:
6246 case CCISS_GETDRIVVER:
6247 case CCISS_REVALIDVOLS:
6248 case CCISS_DEREGDISK:
6249 case CCISS_REGNEWDISK:
6251 case CCISS_RESCANDISK:
6252 case CCISS_GETLUNINFO:
6253 return hpsa_ioctl(dev, cmd, arg);
6255 case CCISS_PASSTHRU32:
6256 return hpsa_ioctl32_passthru(dev, cmd, arg);
6257 case CCISS_BIG_PASSTHRU32:
6258 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
6261 return -ENOIOCTLCMD;
6266 static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
6268 struct hpsa_pci_info pciinfo;
6272 pciinfo.domain = pci_domain_nr(h->pdev->bus);
6273 pciinfo.bus = h->pdev->bus->number;
6274 pciinfo.dev_fn = h->pdev->devfn;
6275 pciinfo.board_id = h->board_id;
6276 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
6281 static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
6283 DriverVer_type DriverVer;
6284 unsigned char vmaj, vmin, vsubmin;
6287 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
6288 &vmaj, &vmin, &vsubmin);
6290 dev_info(&h->pdev->dev, "driver version string '%s' "
6291 "unrecognized.", HPSA_DRIVER_VERSION);
6296 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
6299 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
6304 static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
6306 IOCTL_Command_struct iocommand;
6307 struct CommandList *c;
6314 if (!capable(CAP_SYS_RAWIO))
6316 if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
6318 if ((iocommand.buf_size < 1) &&
6319 (iocommand.Request.Type.Direction != XFER_NONE)) {
6322 if (iocommand.buf_size > 0) {
6323 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
6326 if (iocommand.Request.Type.Direction & XFER_WRITE) {
6327 /* Copy the data into the buffer we created */
6328 if (copy_from_user(buff, iocommand.buf,
6329 iocommand.buf_size)) {
6334 memset(buff, 0, iocommand.buf_size);
6339 /* Fill in the command type */
6340 c->cmd_type = CMD_IOCTL_PEND;
6341 c->scsi_cmd = SCSI_CMD_BUSY;
6342 /* Fill in Command Header */
6343 c->Header.ReplyQueue = 0; /* unused in simple mode */
6344 if (iocommand.buf_size > 0) { /* buffer to fill */
6345 c->Header.SGList = 1;
6346 c->Header.SGTotal = cpu_to_le16(1);
6347 } else { /* no buffers to fill */
6348 c->Header.SGList = 0;
6349 c->Header.SGTotal = cpu_to_le16(0);
6351 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
6353 /* Fill in Request block */
6354 memcpy(&c->Request, &iocommand.Request,
6355 sizeof(c->Request));
6357 /* Fill in the scatter gather information */
6358 if (iocommand.buf_size > 0) {
6359 temp64 = pci_map_single(h->pdev, buff,
6360 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
6361 if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
6362 c->SG[0].Addr = cpu_to_le64(0);
6363 c->SG[0].Len = cpu_to_le32(0);
6367 c->SG[0].Addr = cpu_to_le64(temp64);
6368 c->SG[0].Len = cpu_to_le32(iocommand.buf_size);
6369 c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */
6371 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
6373 if (iocommand.buf_size > 0)
6374 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
6375 check_ioctl_unit_attention(h, c);
6381 /* Copy the error information out */
6382 memcpy(&iocommand.error_info, c->err_info,
6383 sizeof(iocommand.error_info));
6384 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
6388 if ((iocommand.Request.Type.Direction & XFER_READ) &&
6389 iocommand.buf_size > 0) {
6390 /* Copy the data out of the buffer we created */
6391 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
6403 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
6405 BIG_IOCTL_Command_struct *ioc;
6406 struct CommandList *c;
6407 unsigned char **buff = NULL;
6408 int *buff_size = NULL;
6414 BYTE __user *data_ptr;
6418 if (!capable(CAP_SYS_RAWIO))
6420 ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
6425 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
6429 if ((ioc->buf_size < 1) &&
6430 (ioc->Request.Type.Direction != XFER_NONE)) {
6434 /* Check kmalloc limits using all SGs */
6435 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
6439 if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
6443 buff = kcalloc(SG_ENTRIES_IN_CMD, sizeof(char *), GFP_KERNEL);
6448 buff_size = kmalloc_array(SG_ENTRIES_IN_CMD, sizeof(int), GFP_KERNEL);
6453 left = ioc->buf_size;
6454 data_ptr = ioc->buf;
6456 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
6457 buff_size[sg_used] = sz;
6458 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
6459 if (buff[sg_used] == NULL) {
6463 if (ioc->Request.Type.Direction & XFER_WRITE) {
6464 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
6469 memset(buff[sg_used], 0, sz);
6476 c->cmd_type = CMD_IOCTL_PEND;
6477 c->scsi_cmd = SCSI_CMD_BUSY;
6478 c->Header.ReplyQueue = 0;
6479 c->Header.SGList = (u8) sg_used;
6480 c->Header.SGTotal = cpu_to_le16(sg_used);
6481 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
6482 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
6483 if (ioc->buf_size > 0) {
6485 for (i = 0; i < sg_used; i++) {
6486 temp64 = pci_map_single(h->pdev, buff[i],
6487 buff_size[i], PCI_DMA_BIDIRECTIONAL);
6488 if (dma_mapping_error(&h->pdev->dev,
6489 (dma_addr_t) temp64)) {
6490 c->SG[i].Addr = cpu_to_le64(0);
6491 c->SG[i].Len = cpu_to_le32(0);
6492 hpsa_pci_unmap(h->pdev, c, i,
6493 PCI_DMA_BIDIRECTIONAL);
6497 c->SG[i].Addr = cpu_to_le64(temp64);
6498 c->SG[i].Len = cpu_to_le32(buff_size[i]);
6499 c->SG[i].Ext = cpu_to_le32(0);
6501 c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST);
6503 status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
6506 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
6507 check_ioctl_unit_attention(h, c);
6513 /* Copy the error information out */
6514 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
6515 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
6519 if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
6522 /* Copy the data out of the buffer we created */
6523 BYTE __user *ptr = ioc->buf;
6524 for (i = 0; i < sg_used; i++) {
6525 if (copy_to_user(ptr, buff[i], buff_size[i])) {
6529 ptr += buff_size[i];
6539 for (i = 0; i < sg_used; i++)
6548 static void check_ioctl_unit_attention(struct ctlr_info *h,
6549 struct CommandList *c)
6551 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
6552 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
6553 (void) check_for_unit_attention(h, c);
6559 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
6561 struct ctlr_info *h;
6562 void __user *argp = (void __user *)arg;
6565 h = sdev_to_hba(dev);
6568 case CCISS_DEREGDISK:
6569 case CCISS_REGNEWDISK:
6571 hpsa_scan_start(h->scsi_host);
6573 case CCISS_GETPCIINFO:
6574 return hpsa_getpciinfo_ioctl(h, argp);
6575 case CCISS_GETDRIVVER:
6576 return hpsa_getdrivver_ioctl(h, argp);
6577 case CCISS_PASSTHRU:
6578 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
6580 rc = hpsa_passthru_ioctl(h, argp);
6581 atomic_inc(&h->passthru_cmds_avail);
6583 case CCISS_BIG_PASSTHRU:
6584 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
6586 rc = hpsa_big_passthru_ioctl(h, argp);
6587 atomic_inc(&h->passthru_cmds_avail);
6594 static void hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
6597 struct CommandList *c;
6601 /* fill_cmd can't fail here, no data buffer to map */
6602 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
6603 RAID_CTLR_LUNID, TYPE_MSG);
6604 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
6606 enqueue_cmd_and_start_io(h, c);
6607 /* Don't wait for completion, the reset won't complete. Don't free
6608 * the command either. This is the last command we will send before
6609 * re-initializing everything, so it doesn't matter and won't leak.
6614 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
6615 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
6618 int pci_dir = XFER_NONE;
6620 c->cmd_type = CMD_IOCTL_PEND;
6621 c->scsi_cmd = SCSI_CMD_BUSY;
6622 c->Header.ReplyQueue = 0;
6623 if (buff != NULL && size > 0) {
6624 c->Header.SGList = 1;
6625 c->Header.SGTotal = cpu_to_le16(1);
6627 c->Header.SGList = 0;
6628 c->Header.SGTotal = cpu_to_le16(0);
6630 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
6632 if (cmd_type == TYPE_CMD) {
6635 /* are we trying to read a vital product page */
6636 if (page_code & VPD_PAGE) {
6637 c->Request.CDB[1] = 0x01;
6638 c->Request.CDB[2] = (page_code & 0xff);
6640 c->Request.CDBLen = 6;
6641 c->Request.type_attr_dir =
6642 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6643 c->Request.Timeout = 0;
6644 c->Request.CDB[0] = HPSA_INQUIRY;
6645 c->Request.CDB[4] = size & 0xFF;
6647 case RECEIVE_DIAGNOSTIC:
6648 c->Request.CDBLen = 6;
6649 c->Request.type_attr_dir =
6650 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6651 c->Request.Timeout = 0;
6652 c->Request.CDB[0] = cmd;
6653 c->Request.CDB[1] = 1;
6654 c->Request.CDB[2] = 1;
6655 c->Request.CDB[3] = (size >> 8) & 0xFF;
6656 c->Request.CDB[4] = size & 0xFF;
6658 case HPSA_REPORT_LOG:
6659 case HPSA_REPORT_PHYS:
6660 /* Talking to controller so It's a physical command
6661 mode = 00 target = 0. Nothing to write.
6663 c->Request.CDBLen = 12;
6664 c->Request.type_attr_dir =
6665 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6666 c->Request.Timeout = 0;
6667 c->Request.CDB[0] = cmd;
6668 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
6669 c->Request.CDB[7] = (size >> 16) & 0xFF;
6670 c->Request.CDB[8] = (size >> 8) & 0xFF;
6671 c->Request.CDB[9] = size & 0xFF;
6673 case BMIC_SENSE_DIAG_OPTIONS:
6674 c->Request.CDBLen = 16;
6675 c->Request.type_attr_dir =
6676 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6677 c->Request.Timeout = 0;
6678 /* Spec says this should be BMIC_WRITE */
6679 c->Request.CDB[0] = BMIC_READ;
6680 c->Request.CDB[6] = BMIC_SENSE_DIAG_OPTIONS;
6682 case BMIC_SET_DIAG_OPTIONS:
6683 c->Request.CDBLen = 16;
6684 c->Request.type_attr_dir =
6685 TYPE_ATTR_DIR(cmd_type,
6686 ATTR_SIMPLE, XFER_WRITE);
6687 c->Request.Timeout = 0;
6688 c->Request.CDB[0] = BMIC_WRITE;
6689 c->Request.CDB[6] = BMIC_SET_DIAG_OPTIONS;
6691 case HPSA_CACHE_FLUSH:
6692 c->Request.CDBLen = 12;
6693 c->Request.type_attr_dir =
6694 TYPE_ATTR_DIR(cmd_type,
6695 ATTR_SIMPLE, XFER_WRITE);
6696 c->Request.Timeout = 0;
6697 c->Request.CDB[0] = BMIC_WRITE;
6698 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
6699 c->Request.CDB[7] = (size >> 8) & 0xFF;
6700 c->Request.CDB[8] = size & 0xFF;
6702 case TEST_UNIT_READY:
6703 c->Request.CDBLen = 6;
6704 c->Request.type_attr_dir =
6705 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6706 c->Request.Timeout = 0;
6708 case HPSA_GET_RAID_MAP:
6709 c->Request.CDBLen = 12;
6710 c->Request.type_attr_dir =
6711 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6712 c->Request.Timeout = 0;
6713 c->Request.CDB[0] = HPSA_CISS_READ;
6714 c->Request.CDB[1] = cmd;
6715 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
6716 c->Request.CDB[7] = (size >> 16) & 0xFF;
6717 c->Request.CDB[8] = (size >> 8) & 0xFF;
6718 c->Request.CDB[9] = size & 0xFF;
6720 case BMIC_SENSE_CONTROLLER_PARAMETERS:
6721 c->Request.CDBLen = 10;
6722 c->Request.type_attr_dir =
6723 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6724 c->Request.Timeout = 0;
6725 c->Request.CDB[0] = BMIC_READ;
6726 c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
6727 c->Request.CDB[7] = (size >> 16) & 0xFF;
6728 c->Request.CDB[8] = (size >> 8) & 0xFF;
6730 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
6731 c->Request.CDBLen = 10;
6732 c->Request.type_attr_dir =
6733 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6734 c->Request.Timeout = 0;
6735 c->Request.CDB[0] = BMIC_READ;
6736 c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE;
6737 c->Request.CDB[7] = (size >> 16) & 0xFF;
6738 c->Request.CDB[8] = (size >> 8) & 0XFF;
6740 case BMIC_SENSE_SUBSYSTEM_INFORMATION:
6741 c->Request.CDBLen = 10;
6742 c->Request.type_attr_dir =
6743 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6744 c->Request.Timeout = 0;
6745 c->Request.CDB[0] = BMIC_READ;
6746 c->Request.CDB[6] = BMIC_SENSE_SUBSYSTEM_INFORMATION;
6747 c->Request.CDB[7] = (size >> 16) & 0xFF;
6748 c->Request.CDB[8] = (size >> 8) & 0XFF;
6750 case BMIC_SENSE_STORAGE_BOX_PARAMS:
6751 c->Request.CDBLen = 10;
6752 c->Request.type_attr_dir =
6753 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6754 c->Request.Timeout = 0;
6755 c->Request.CDB[0] = BMIC_READ;
6756 c->Request.CDB[6] = BMIC_SENSE_STORAGE_BOX_PARAMS;
6757 c->Request.CDB[7] = (size >> 16) & 0xFF;
6758 c->Request.CDB[8] = (size >> 8) & 0XFF;
6760 case BMIC_IDENTIFY_CONTROLLER:
6761 c->Request.CDBLen = 10;
6762 c->Request.type_attr_dir =
6763 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6764 c->Request.Timeout = 0;
6765 c->Request.CDB[0] = BMIC_READ;
6766 c->Request.CDB[1] = 0;
6767 c->Request.CDB[2] = 0;
6768 c->Request.CDB[3] = 0;
6769 c->Request.CDB[4] = 0;
6770 c->Request.CDB[5] = 0;
6771 c->Request.CDB[6] = BMIC_IDENTIFY_CONTROLLER;
6772 c->Request.CDB[7] = (size >> 16) & 0xFF;
6773 c->Request.CDB[8] = (size >> 8) & 0XFF;
6774 c->Request.CDB[9] = 0;
6777 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
6780 } else if (cmd_type == TYPE_MSG) {
6783 case HPSA_PHYS_TARGET_RESET:
6784 c->Request.CDBLen = 16;
6785 c->Request.type_attr_dir =
6786 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6787 c->Request.Timeout = 0; /* Don't time out */
6788 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
6789 c->Request.CDB[0] = HPSA_RESET;
6790 c->Request.CDB[1] = HPSA_TARGET_RESET_TYPE;
6791 /* Physical target reset needs no control bytes 4-7*/
6792 c->Request.CDB[4] = 0x00;
6793 c->Request.CDB[5] = 0x00;
6794 c->Request.CDB[6] = 0x00;
6795 c->Request.CDB[7] = 0x00;
6797 case HPSA_DEVICE_RESET_MSG:
6798 c->Request.CDBLen = 16;
6799 c->Request.type_attr_dir =
6800 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6801 c->Request.Timeout = 0; /* Don't time out */
6802 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
6803 c->Request.CDB[0] = cmd;
6804 c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
6805 /* If bytes 4-7 are zero, it means reset the */
6807 c->Request.CDB[4] = 0x00;
6808 c->Request.CDB[5] = 0x00;
6809 c->Request.CDB[6] = 0x00;
6810 c->Request.CDB[7] = 0x00;
6813 dev_warn(&h->pdev->dev, "unknown message type %d\n",
6818 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
6822 switch (GET_DIR(c->Request.type_attr_dir)) {
6824 pci_dir = PCI_DMA_FROMDEVICE;
6827 pci_dir = PCI_DMA_TODEVICE;
6830 pci_dir = PCI_DMA_NONE;
6833 pci_dir = PCI_DMA_BIDIRECTIONAL;
6835 if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
6841 * Map (physical) PCI mem into (virtual) kernel space
6843 static void __iomem *remap_pci_mem(ulong base, ulong size)
6845 ulong page_base = ((ulong) base) & PAGE_MASK;
6846 ulong page_offs = ((ulong) base) - page_base;
6847 void __iomem *page_remapped = ioremap_nocache(page_base,
6850 return page_remapped ? (page_remapped + page_offs) : NULL;
6853 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
6855 return h->access.command_completed(h, q);
6858 static inline bool interrupt_pending(struct ctlr_info *h)
6860 return h->access.intr_pending(h);
6863 static inline long interrupt_not_for_us(struct ctlr_info *h)
6865 return (h->access.intr_pending(h) == 0) ||
6866 (h->interrupts_enabled == 0);
6869 static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
6872 if (unlikely(tag_index >= h->nr_cmds)) {
6873 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
6879 static inline void finish_cmd(struct CommandList *c)
6881 dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
6882 if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
6883 || c->cmd_type == CMD_IOACCEL2))
6884 complete_scsi_command(c);
6885 else if (c->cmd_type == CMD_IOCTL_PEND || c->cmd_type == IOACCEL2_TMF)
6886 complete(c->waiting);
6889 /* process completion of an indexed ("direct lookup") command */
6890 static inline void process_indexed_cmd(struct ctlr_info *h,
6894 struct CommandList *c;
6896 tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT;
6897 if (!bad_tag(h, tag_index, raw_tag)) {
6898 c = h->cmd_pool + tag_index;
6903 /* Some controllers, like p400, will give us one interrupt
6904 * after a soft reset, even if we turned interrupts off.
6905 * Only need to check for this in the hpsa_xxx_discard_completions
6908 static int ignore_bogus_interrupt(struct ctlr_info *h)
6910 if (likely(!reset_devices))
6913 if (likely(h->interrupts_enabled))
6916 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
6917 "(known firmware bug.) Ignoring.\n");
6923 * Convert &h->q[x] (passed to interrupt handlers) back to h.
6924 * Relies on (h-q[x] == x) being true for x such that
6925 * 0 <= x < MAX_REPLY_QUEUES.
6927 static struct ctlr_info *queue_to_hba(u8 *queue)
6929 return container_of((queue - *queue), struct ctlr_info, q[0]);
6932 static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
6934 struct ctlr_info *h = queue_to_hba(queue);
6935 u8 q = *(u8 *) queue;
6938 if (ignore_bogus_interrupt(h))
6941 if (interrupt_not_for_us(h))
6943 h->last_intr_timestamp = get_jiffies_64();
6944 while (interrupt_pending(h)) {
6945 raw_tag = get_next_completion(h, q);
6946 while (raw_tag != FIFO_EMPTY)
6947 raw_tag = next_command(h, q);
6952 static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
6954 struct ctlr_info *h = queue_to_hba(queue);
6956 u8 q = *(u8 *) queue;
6958 if (ignore_bogus_interrupt(h))
6961 h->last_intr_timestamp = get_jiffies_64();
6962 raw_tag = get_next_completion(h, q);
6963 while (raw_tag != FIFO_EMPTY)
6964 raw_tag = next_command(h, q);
6968 static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
6970 struct ctlr_info *h = queue_to_hba((u8 *) queue);
6972 u8 q = *(u8 *) queue;
6974 if (interrupt_not_for_us(h))
6976 h->last_intr_timestamp = get_jiffies_64();
6977 while (interrupt_pending(h)) {
6978 raw_tag = get_next_completion(h, q);
6979 while (raw_tag != FIFO_EMPTY) {
6980 process_indexed_cmd(h, raw_tag);
6981 raw_tag = next_command(h, q);
6987 static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
6989 struct ctlr_info *h = queue_to_hba(queue);
6991 u8 q = *(u8 *) queue;
6993 h->last_intr_timestamp = get_jiffies_64();
6994 raw_tag = get_next_completion(h, q);
6995 while (raw_tag != FIFO_EMPTY) {
6996 process_indexed_cmd(h, raw_tag);
6997 raw_tag = next_command(h, q);
7002 /* Send a message CDB to the firmware. Careful, this only works
7003 * in simple mode, not performant mode due to the tag lookup.
7004 * We only ever use this immediately after a controller reset.
7006 static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
7010 struct CommandListHeader CommandHeader;
7011 struct RequestBlock Request;
7012 struct ErrDescriptor ErrorDescriptor;
7014 struct Command *cmd;
7015 static const size_t cmd_sz = sizeof(*cmd) +
7016 sizeof(cmd->ErrorDescriptor);
7020 void __iomem *vaddr;
7023 vaddr = pci_ioremap_bar(pdev, 0);
7027 /* The Inbound Post Queue only accepts 32-bit physical addresses for the
7028 * CCISS commands, so they must be allocated from the lower 4GiB of
7031 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
7037 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
7043 /* This must fit, because of the 32-bit consistent DMA mask. Also,
7044 * although there's no guarantee, we assume that the address is at
7045 * least 4-byte aligned (most likely, it's page-aligned).
7047 paddr32 = cpu_to_le32(paddr64);
7049 cmd->CommandHeader.ReplyQueue = 0;
7050 cmd->CommandHeader.SGList = 0;
7051 cmd->CommandHeader.SGTotal = cpu_to_le16(0);
7052 cmd->CommandHeader.tag = cpu_to_le64(paddr64);
7053 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
7055 cmd->Request.CDBLen = 16;
7056 cmd->Request.type_attr_dir =
7057 TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE);
7058 cmd->Request.Timeout = 0; /* Don't time out */
7059 cmd->Request.CDB[0] = opcode;
7060 cmd->Request.CDB[1] = type;
7061 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
7062 cmd->ErrorDescriptor.Addr =
7063 cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd)));
7064 cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo));
7066 writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET);
7068 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
7069 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
7070 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64)
7072 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
7077 /* we leak the DMA buffer here ... no choice since the controller could
7078 * still complete the command.
7080 if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
7081 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
7086 pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
7088 if (tag & HPSA_ERROR_BIT) {
7089 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
7094 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
7099 #define hpsa_noop(p) hpsa_message(p, 3, 0)
7101 static int hpsa_controller_hard_reset(struct pci_dev *pdev,
7102 void __iomem *vaddr, u32 use_doorbell)
7106 /* For everything after the P600, the PCI power state method
7107 * of resetting the controller doesn't work, so we have this
7108 * other way using the doorbell register.
7110 dev_info(&pdev->dev, "using doorbell to reset controller\n");
7111 writel(use_doorbell, vaddr + SA5_DOORBELL);
7113 /* PMC hardware guys tell us we need a 10 second delay after
7114 * doorbell reset and before any attempt to talk to the board
7115 * at all to ensure that this actually works and doesn't fall
7116 * over in some weird corner cases.
7119 } else { /* Try to do it the PCI power state way */
7121 /* Quoting from the Open CISS Specification: "The Power
7122 * Management Control/Status Register (CSR) controls the power
7123 * state of the device. The normal operating state is D0,
7124 * CSR=00h. The software off state is D3, CSR=03h. To reset
7125 * the controller, place the interface device in D3 then to D0,
7126 * this causes a secondary PCI reset which will reset the
7131 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
7133 /* enter the D3hot power management state */
7134 rc = pci_set_power_state(pdev, PCI_D3hot);
7140 /* enter the D0 power management state */
7141 rc = pci_set_power_state(pdev, PCI_D0);
7146 * The P600 requires a small delay when changing states.
7147 * Otherwise we may think the board did not reset and we bail.
7148 * This for kdump only and is particular to the P600.
7155 static void init_driver_version(char *driver_version, int len)
7157 memset(driver_version, 0, len);
7158 strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
7161 static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
7163 char *driver_version;
7164 int i, size = sizeof(cfgtable->driver_version);
7166 driver_version = kmalloc(size, GFP_KERNEL);
7167 if (!driver_version)
7170 init_driver_version(driver_version, size);
7171 for (i = 0; i < size; i++)
7172 writeb(driver_version[i], &cfgtable->driver_version[i]);
7173 kfree(driver_version);
7177 static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
7178 unsigned char *driver_ver)
7182 for (i = 0; i < sizeof(cfgtable->driver_version); i++)
7183 driver_ver[i] = readb(&cfgtable->driver_version[i]);
7186 static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
7189 char *driver_ver, *old_driver_ver;
7190 int rc, size = sizeof(cfgtable->driver_version);
7192 old_driver_ver = kmalloc_array(2, size, GFP_KERNEL);
7193 if (!old_driver_ver)
7195 driver_ver = old_driver_ver + size;
7197 /* After a reset, the 32 bytes of "driver version" in the cfgtable
7198 * should have been changed, otherwise we know the reset failed.
7200 init_driver_version(old_driver_ver, size);
7201 read_driver_ver_from_cfgtable(cfgtable, driver_ver);
7202 rc = !memcmp(driver_ver, old_driver_ver, size);
7203 kfree(old_driver_ver);
7206 /* This does a hard reset of the controller using PCI power management
7207 * states or the using the doorbell register.
7209 static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev, u32 board_id)
7213 u64 cfg_base_addr_index;
7214 void __iomem *vaddr;
7215 unsigned long paddr;
7216 u32 misc_fw_support;
7218 struct CfgTable __iomem *cfgtable;
7220 u16 command_register;
7222 /* For controllers as old as the P600, this is very nearly
7225 * pci_save_state(pci_dev);
7226 * pci_set_power_state(pci_dev, PCI_D3hot);
7227 * pci_set_power_state(pci_dev, PCI_D0);
7228 * pci_restore_state(pci_dev);
7230 * For controllers newer than the P600, the pci power state
7231 * method of resetting doesn't work so we have another way
7232 * using the doorbell register.
7235 if (!ctlr_is_resettable(board_id)) {
7236 dev_warn(&pdev->dev, "Controller not resettable\n");
7240 /* if controller is soft- but not hard resettable... */
7241 if (!ctlr_is_hard_resettable(board_id))
7242 return -ENOTSUPP; /* try soft reset later. */
7244 /* Save the PCI command register */
7245 pci_read_config_word(pdev, 4, &command_register);
7246 pci_save_state(pdev);
7248 /* find the first memory BAR, so we can find the cfg table */
7249 rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
7252 vaddr = remap_pci_mem(paddr, 0x250);
7256 /* find cfgtable in order to check if reset via doorbell is supported */
7257 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
7258 &cfg_base_addr_index, &cfg_offset);
7261 cfgtable = remap_pci_mem(pci_resource_start(pdev,
7262 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
7267 rc = write_driver_ver_to_cfgtable(cfgtable);
7269 goto unmap_cfgtable;
7271 /* If reset via doorbell register is supported, use that.
7272 * There are two such methods. Favor the newest method.
7274 misc_fw_support = readl(&cfgtable->misc_fw_support);
7275 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
7277 use_doorbell = DOORBELL_CTLR_RESET2;
7279 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
7281 dev_warn(&pdev->dev,
7282 "Soft reset not supported. Firmware update is required.\n");
7283 rc = -ENOTSUPP; /* try soft reset */
7284 goto unmap_cfgtable;
7288 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
7290 goto unmap_cfgtable;
7292 pci_restore_state(pdev);
7293 pci_write_config_word(pdev, 4, command_register);
7295 /* Some devices (notably the HP Smart Array 5i Controller)
7296 need a little pause here */
7297 msleep(HPSA_POST_RESET_PAUSE_MSECS);
7299 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
7301 dev_warn(&pdev->dev,
7302 "Failed waiting for board to become ready after hard reset\n");
7303 goto unmap_cfgtable;
7306 rc = controller_reset_failed(vaddr);
7308 goto unmap_cfgtable;
7310 dev_warn(&pdev->dev, "Unable to successfully reset "
7311 "controller. Will try soft reset.\n");
7314 dev_info(&pdev->dev, "board ready after hard reset.\n");
7326 * We cannot read the structure directly, for portability we must use
7328 * This is for debug only.
7330 static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb)
7336 dev_info(dev, "Controller Configuration information\n");
7337 dev_info(dev, "------------------------------------\n");
7338 for (i = 0; i < 4; i++)
7339 temp_name[i] = readb(&(tb->Signature[i]));
7340 temp_name[4] = '\0';
7341 dev_info(dev, " Signature = %s\n", temp_name);
7342 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
7343 dev_info(dev, " Transport methods supported = 0x%x\n",
7344 readl(&(tb->TransportSupport)));
7345 dev_info(dev, " Transport methods active = 0x%x\n",
7346 readl(&(tb->TransportActive)));
7347 dev_info(dev, " Requested transport Method = 0x%x\n",
7348 readl(&(tb->HostWrite.TransportRequest)));
7349 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
7350 readl(&(tb->HostWrite.CoalIntDelay)));
7351 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
7352 readl(&(tb->HostWrite.CoalIntCount)));
7353 dev_info(dev, " Max outstanding commands = %d\n",
7354 readl(&(tb->CmdsOutMax)));
7355 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
7356 for (i = 0; i < 16; i++)
7357 temp_name[i] = readb(&(tb->ServerName[i]));
7358 temp_name[16] = '\0';
7359 dev_info(dev, " Server Name = %s\n", temp_name);
7360 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
7361 readl(&(tb->HeartBeat)));
7362 #endif /* HPSA_DEBUG */
7365 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
7367 int i, offset, mem_type, bar_type;
7369 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
7372 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
7373 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
7374 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
7377 mem_type = pci_resource_flags(pdev, i) &
7378 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
7380 case PCI_BASE_ADDRESS_MEM_TYPE_32:
7381 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
7382 offset += 4; /* 32 bit */
7384 case PCI_BASE_ADDRESS_MEM_TYPE_64:
7387 default: /* reserved in PCI 2.2 */
7388 dev_warn(&pdev->dev,
7389 "base address is invalid\n");
7394 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
7400 static void hpsa_disable_interrupt_mode(struct ctlr_info *h)
7402 pci_free_irq_vectors(h->pdev);
7403 h->msix_vectors = 0;
7406 static void hpsa_setup_reply_map(struct ctlr_info *h)
7408 const struct cpumask *mask;
7409 unsigned int queue, cpu;
7411 for (queue = 0; queue < h->msix_vectors; queue++) {
7412 mask = pci_irq_get_affinity(h->pdev, queue);
7416 for_each_cpu(cpu, mask)
7417 h->reply_map[cpu] = queue;
7422 for_each_possible_cpu(cpu)
7423 h->reply_map[cpu] = 0;
7426 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
7427 * controllers that are capable. If not, we use legacy INTx mode.
7429 static int hpsa_interrupt_mode(struct ctlr_info *h)
7431 unsigned int flags = PCI_IRQ_LEGACY;
7434 /* Some boards advertise MSI but don't really support it */
7435 switch (h->board_id) {
7442 ret = pci_alloc_irq_vectors(h->pdev, 1, MAX_REPLY_QUEUES,
7443 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
7445 h->msix_vectors = ret;
7449 flags |= PCI_IRQ_MSI;
7453 ret = pci_alloc_irq_vectors(h->pdev, 1, 1, flags);
7459 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id,
7463 u32 subsystem_vendor_id, subsystem_device_id;
7465 subsystem_vendor_id = pdev->subsystem_vendor;
7466 subsystem_device_id = pdev->subsystem_device;
7467 *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
7468 subsystem_vendor_id;
7471 *legacy_board = false;
7472 for (i = 0; i < ARRAY_SIZE(products); i++)
7473 if (*board_id == products[i].board_id) {
7474 if (products[i].access != &SA5A_access &&
7475 products[i].access != &SA5B_access)
7477 dev_warn(&pdev->dev,
7478 "legacy board ID: 0x%08x\n",
7481 *legacy_board = true;
7485 dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x\n", *board_id);
7487 *legacy_board = true;
7488 return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
7491 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
7492 unsigned long *memory_bar)
7496 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
7497 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
7498 /* addressing mode bits already removed */
7499 *memory_bar = pci_resource_start(pdev, i);
7500 dev_dbg(&pdev->dev, "memory BAR = %lx\n",
7504 dev_warn(&pdev->dev, "no memory BAR found\n");
7508 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
7514 iterations = HPSA_BOARD_READY_ITERATIONS;
7516 iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
7518 for (i = 0; i < iterations; i++) {
7519 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
7520 if (wait_for_ready) {
7521 if (scratchpad == HPSA_FIRMWARE_READY)
7524 if (scratchpad != HPSA_FIRMWARE_READY)
7527 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
7529 dev_warn(&pdev->dev, "board not ready, timed out.\n");
7533 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
7534 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
7537 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
7538 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
7539 *cfg_base_addr &= (u32) 0x0000ffff;
7540 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
7541 if (*cfg_base_addr_index == -1) {
7542 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
7548 static void hpsa_free_cfgtables(struct ctlr_info *h)
7550 if (h->transtable) {
7551 iounmap(h->transtable);
7552 h->transtable = NULL;
7555 iounmap(h->cfgtable);
7560 /* Find and map CISS config table and transfer table
7561 + * several items must be unmapped (freed) later
7563 static int hpsa_find_cfgtables(struct ctlr_info *h)
7567 u64 cfg_base_addr_index;
7571 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
7572 &cfg_base_addr_index, &cfg_offset);
7575 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
7576 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
7578 dev_err(&h->pdev->dev, "Failed mapping cfgtable\n");
7581 rc = write_driver_ver_to_cfgtable(h->cfgtable);
7584 /* Find performant mode table. */
7585 trans_offset = readl(&h->cfgtable->TransMethodOffset);
7586 h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
7587 cfg_base_addr_index)+cfg_offset+trans_offset,
7588 sizeof(*h->transtable));
7589 if (!h->transtable) {
7590 dev_err(&h->pdev->dev, "Failed mapping transfer table\n");
7591 hpsa_free_cfgtables(h);
7597 static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
7599 #define MIN_MAX_COMMANDS 16
7600 BUILD_BUG_ON(MIN_MAX_COMMANDS <= HPSA_NRESERVED_CMDS);
7602 h->max_commands = readl(&h->cfgtable->MaxPerformantModeCommands);
7604 /* Limit commands in memory limited kdump scenario. */
7605 if (reset_devices && h->max_commands > 32)
7606 h->max_commands = 32;
7608 if (h->max_commands < MIN_MAX_COMMANDS) {
7609 dev_warn(&h->pdev->dev,
7610 "Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n",
7613 h->max_commands = MIN_MAX_COMMANDS;
7617 /* If the controller reports that the total max sg entries is greater than 512,
7618 * then we know that chained SG blocks work. (Original smart arrays did not
7619 * support chained SG blocks and would return zero for max sg entries.)
7621 static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h)
7623 return h->maxsgentries > 512;
7626 /* Interrogate the hardware for some limits:
7627 * max commands, max SG elements without chaining, and with chaining,
7628 * SG chain block size, etc.
7630 static void hpsa_find_board_params(struct ctlr_info *h)
7632 hpsa_get_max_perf_mode_cmds(h);
7633 h->nr_cmds = h->max_commands;
7634 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
7635 h->fw_support = readl(&(h->cfgtable->misc_fw_support));
7636 if (hpsa_supports_chained_sg_blocks(h)) {
7637 /* Limit in-command s/g elements to 32 save dma'able memory. */
7638 h->max_cmd_sg_entries = 32;
7639 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries;
7640 h->maxsgentries--; /* save one for chain pointer */
7643 * Original smart arrays supported at most 31 s/g entries
7644 * embedded inline in the command (trying to use more
7645 * would lock up the controller)
7647 h->max_cmd_sg_entries = 31;
7648 h->maxsgentries = 31; /* default to traditional values */
7652 /* Find out what task management functions are supported and cache */
7653 h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
7654 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
7655 dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
7656 if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
7657 dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
7658 if (!(HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags))
7659 dev_warn(&h->pdev->dev, "HP SSD Smart Path aborts not supported\n");
7662 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
7664 if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
7665 dev_err(&h->pdev->dev, "not a valid CISS config table\n");
7671 static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
7675 driver_support = readl(&(h->cfgtable->driver_support));
7676 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
7678 driver_support |= ENABLE_SCSI_PREFETCH;
7680 driver_support |= ENABLE_UNIT_ATTN;
7681 writel(driver_support, &(h->cfgtable->driver_support));
7684 /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
7685 * in a prefetch beyond physical memory.
7687 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
7691 if (h->board_id != 0x3225103C)
7693 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
7694 dma_prefetch |= 0x8000;
7695 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
7698 static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
7702 unsigned long flags;
7703 /* wait until the clear_event_notify bit 6 is cleared by controller. */
7704 for (i = 0; i < MAX_CLEAR_EVENT_WAIT; i++) {
7705 spin_lock_irqsave(&h->lock, flags);
7706 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7707 spin_unlock_irqrestore(&h->lock, flags);
7708 if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
7710 /* delay and try again */
7711 msleep(CLEAR_EVENT_WAIT_INTERVAL);
7718 static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
7722 unsigned long flags;
7724 /* under certain very rare conditions, this can take awhile.
7725 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
7726 * as we enter this code.)
7728 for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) {
7729 if (h->remove_in_progress)
7731 spin_lock_irqsave(&h->lock, flags);
7732 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7733 spin_unlock_irqrestore(&h->lock, flags);
7734 if (!(doorbell_value & CFGTBL_ChangeReq))
7736 /* delay and try again */
7737 msleep(MODE_CHANGE_WAIT_INTERVAL);
7744 /* return -ENODEV or other reason on error, 0 on success */
7745 static int hpsa_enter_simple_mode(struct ctlr_info *h)
7749 trans_support = readl(&(h->cfgtable->TransportSupport));
7750 if (!(trans_support & SIMPLE_MODE))
7753 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
7755 /* Update the field, and then ring the doorbell */
7756 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
7757 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
7758 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7759 if (hpsa_wait_for_mode_change_ack(h))
7761 print_cfg_table(&h->pdev->dev, h->cfgtable);
7762 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
7764 h->transMethod = CFGTBL_Trans_Simple;
7767 dev_err(&h->pdev->dev, "failed to enter simple mode\n");
7771 /* free items allocated or mapped by hpsa_pci_init */
7772 static void hpsa_free_pci_init(struct ctlr_info *h)
7774 hpsa_free_cfgtables(h); /* pci_init 4 */
7775 iounmap(h->vaddr); /* pci_init 3 */
7777 hpsa_disable_interrupt_mode(h); /* pci_init 2 */
7779 * call pci_disable_device before pci_release_regions per
7780 * Documentation/PCI/pci.txt
7782 pci_disable_device(h->pdev); /* pci_init 1 */
7783 pci_release_regions(h->pdev); /* pci_init 2 */
7786 /* several items must be freed later */
7787 static int hpsa_pci_init(struct ctlr_info *h)
7789 int prod_index, err;
7792 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id, &legacy_board);
7795 h->product_name = products[prod_index].product_name;
7796 h->access = *(products[prod_index].access);
7797 h->legacy_board = legacy_board;
7798 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
7799 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
7801 err = pci_enable_device(h->pdev);
7803 dev_err(&h->pdev->dev, "failed to enable PCI device\n");
7804 pci_disable_device(h->pdev);
7808 err = pci_request_regions(h->pdev, HPSA);
7810 dev_err(&h->pdev->dev,
7811 "failed to obtain PCI resources\n");
7812 pci_disable_device(h->pdev);
7816 pci_set_master(h->pdev);
7818 err = hpsa_interrupt_mode(h);
7822 /* setup mapping between CPU and reply queue */
7823 hpsa_setup_reply_map(h);
7825 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
7827 goto clean2; /* intmode+region, pci */
7828 h->vaddr = remap_pci_mem(h->paddr, 0x250);
7830 dev_err(&h->pdev->dev, "failed to remap PCI mem\n");
7832 goto clean2; /* intmode+region, pci */
7834 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
7836 goto clean3; /* vaddr, intmode+region, pci */
7837 err = hpsa_find_cfgtables(h);
7839 goto clean3; /* vaddr, intmode+region, pci */
7840 hpsa_find_board_params(h);
7842 if (!hpsa_CISS_signature_present(h)) {
7844 goto clean4; /* cfgtables, vaddr, intmode+region, pci */
7846 hpsa_set_driver_support_bits(h);
7847 hpsa_p600_dma_prefetch_quirk(h);
7848 err = hpsa_enter_simple_mode(h);
7850 goto clean4; /* cfgtables, vaddr, intmode+region, pci */
7853 clean4: /* cfgtables, vaddr, intmode+region, pci */
7854 hpsa_free_cfgtables(h);
7855 clean3: /* vaddr, intmode+region, pci */
7858 clean2: /* intmode+region, pci */
7859 hpsa_disable_interrupt_mode(h);
7862 * call pci_disable_device before pci_release_regions per
7863 * Documentation/PCI/pci.txt
7865 pci_disable_device(h->pdev);
7866 pci_release_regions(h->pdev);
7870 static void hpsa_hba_inquiry(struct ctlr_info *h)
7874 #define HBA_INQUIRY_BYTE_COUNT 64
7875 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
7876 if (!h->hba_inquiry_data)
7878 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
7879 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
7881 kfree(h->hba_inquiry_data);
7882 h->hba_inquiry_data = NULL;
7886 static int hpsa_init_reset_devices(struct pci_dev *pdev, u32 board_id)
7889 void __iomem *vaddr;
7894 /* kdump kernel is loading, we don't know in which state is
7895 * the pci interface. The dev->enable_cnt is equal zero
7896 * so we call enable+disable, wait a while and switch it on.
7898 rc = pci_enable_device(pdev);
7900 dev_warn(&pdev->dev, "Failed to enable PCI device\n");
7903 pci_disable_device(pdev);
7904 msleep(260); /* a randomly chosen number */
7905 rc = pci_enable_device(pdev);
7907 dev_warn(&pdev->dev, "failed to enable device.\n");
7911 pci_set_master(pdev);
7913 vaddr = pci_ioremap_bar(pdev, 0);
7914 if (vaddr == NULL) {
7918 writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET);
7921 /* Reset the controller with a PCI power-cycle or via doorbell */
7922 rc = hpsa_kdump_hard_reset_controller(pdev, board_id);
7924 /* -ENOTSUPP here means we cannot reset the controller
7925 * but it's already (and still) up and running in
7926 * "performant mode". Or, it might be 640x, which can't reset
7927 * due to concerns about shared bbwc between 6402/6404 pair.
7932 /* Now try to get the controller to respond to a no-op */
7933 dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n");
7934 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
7935 if (hpsa_noop(pdev) == 0)
7938 dev_warn(&pdev->dev, "no-op failed%s\n",
7939 (i < 11 ? "; re-trying" : ""));
7944 pci_disable_device(pdev);
7948 static void hpsa_free_cmd_pool(struct ctlr_info *h)
7950 kfree(h->cmd_pool_bits);
7951 h->cmd_pool_bits = NULL;
7953 pci_free_consistent(h->pdev,
7954 h->nr_cmds * sizeof(struct CommandList),
7956 h->cmd_pool_dhandle);
7958 h->cmd_pool_dhandle = 0;
7960 if (h->errinfo_pool) {
7961 pci_free_consistent(h->pdev,
7962 h->nr_cmds * sizeof(struct ErrorInfo),
7964 h->errinfo_pool_dhandle);
7965 h->errinfo_pool = NULL;
7966 h->errinfo_pool_dhandle = 0;
7970 static int hpsa_alloc_cmd_pool(struct ctlr_info *h)
7972 h->cmd_pool_bits = kcalloc(DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG),
7973 sizeof(unsigned long),
7975 h->cmd_pool = pci_alloc_consistent(h->pdev,
7976 h->nr_cmds * sizeof(*h->cmd_pool),
7977 &(h->cmd_pool_dhandle));
7978 h->errinfo_pool = pci_alloc_consistent(h->pdev,
7979 h->nr_cmds * sizeof(*h->errinfo_pool),
7980 &(h->errinfo_pool_dhandle));
7981 if ((h->cmd_pool_bits == NULL)
7982 || (h->cmd_pool == NULL)
7983 || (h->errinfo_pool == NULL)) {
7984 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
7987 hpsa_preinitialize_commands(h);
7990 hpsa_free_cmd_pool(h);
7994 /* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
7995 static void hpsa_free_irqs(struct ctlr_info *h)
7999 if (!h->msix_vectors || h->intr_mode != PERF_MODE_INT) {
8000 /* Single reply queue, only one irq to free */
8001 free_irq(pci_irq_vector(h->pdev, 0), &h->q[h->intr_mode]);
8002 h->q[h->intr_mode] = 0;
8006 for (i = 0; i < h->msix_vectors; i++) {
8007 free_irq(pci_irq_vector(h->pdev, i), &h->q[i]);
8010 for (; i < MAX_REPLY_QUEUES; i++)
8014 /* returns 0 on success; cleans up and returns -Enn on error */
8015 static int hpsa_request_irqs(struct ctlr_info *h,
8016 irqreturn_t (*msixhandler)(int, void *),
8017 irqreturn_t (*intxhandler)(int, void *))
8022 * initialize h->q[x] = x so that interrupt handlers know which
8025 for (i = 0; i < MAX_REPLY_QUEUES; i++)
8028 if (h->intr_mode == PERF_MODE_INT && h->msix_vectors > 0) {
8029 /* If performant mode and MSI-X, use multiple reply queues */
8030 for (i = 0; i < h->msix_vectors; i++) {
8031 sprintf(h->intrname[i], "%s-msix%d", h->devname, i);
8032 rc = request_irq(pci_irq_vector(h->pdev, i), msixhandler,
8038 dev_err(&h->pdev->dev,
8039 "failed to get irq %d for %s\n",
8040 pci_irq_vector(h->pdev, i), h->devname);
8041 for (j = 0; j < i; j++) {
8042 free_irq(pci_irq_vector(h->pdev, j), &h->q[j]);
8045 for (; j < MAX_REPLY_QUEUES; j++)
8051 /* Use single reply pool */
8052 if (h->msix_vectors > 0 || h->pdev->msi_enabled) {
8053 sprintf(h->intrname[0], "%s-msi%s", h->devname,
8054 h->msix_vectors ? "x" : "");
8055 rc = request_irq(pci_irq_vector(h->pdev, 0),
8058 &h->q[h->intr_mode]);
8060 sprintf(h->intrname[h->intr_mode],
8061 "%s-intx", h->devname);
8062 rc = request_irq(pci_irq_vector(h->pdev, 0),
8063 intxhandler, IRQF_SHARED,
8065 &h->q[h->intr_mode]);
8069 dev_err(&h->pdev->dev, "failed to get irq %d for %s\n",
8070 pci_irq_vector(h->pdev, 0), h->devname);
8077 static int hpsa_kdump_soft_reset(struct ctlr_info *h)
8080 hpsa_send_host_reset(h, RAID_CTLR_LUNID, HPSA_RESET_TYPE_CONTROLLER);
8082 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
8083 rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY);
8085 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
8089 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
8090 rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
8092 dev_warn(&h->pdev->dev, "Board failed to become ready "
8093 "after soft reset.\n");
8100 static void hpsa_free_reply_queues(struct ctlr_info *h)
8104 for (i = 0; i < h->nreply_queues; i++) {
8105 if (!h->reply_queue[i].head)
8107 pci_free_consistent(h->pdev,
8108 h->reply_queue_size,
8109 h->reply_queue[i].head,
8110 h->reply_queue[i].busaddr);
8111 h->reply_queue[i].head = NULL;
8112 h->reply_queue[i].busaddr = 0;
8114 h->reply_queue_size = 0;
8117 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
8119 hpsa_free_performant_mode(h); /* init_one 7 */
8120 hpsa_free_sg_chain_blocks(h); /* init_one 6 */
8121 hpsa_free_cmd_pool(h); /* init_one 5 */
8122 hpsa_free_irqs(h); /* init_one 4 */
8123 scsi_host_put(h->scsi_host); /* init_one 3 */
8124 h->scsi_host = NULL; /* init_one 3 */
8125 hpsa_free_pci_init(h); /* init_one 2_5 */
8126 free_percpu(h->lockup_detected); /* init_one 2 */
8127 h->lockup_detected = NULL; /* init_one 2 */
8128 if (h->resubmit_wq) {
8129 destroy_workqueue(h->resubmit_wq); /* init_one 1 */
8130 h->resubmit_wq = NULL;
8132 if (h->rescan_ctlr_wq) {
8133 destroy_workqueue(h->rescan_ctlr_wq);
8134 h->rescan_ctlr_wq = NULL;
8136 if (h->monitor_ctlr_wq) {
8137 destroy_workqueue(h->monitor_ctlr_wq);
8138 h->monitor_ctlr_wq = NULL;
8141 kfree(h); /* init_one 1 */
8144 /* Called when controller lockup detected. */
8145 static void fail_all_outstanding_cmds(struct ctlr_info *h)
8148 struct CommandList *c;
8151 flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */
8152 for (i = 0; i < h->nr_cmds; i++) {
8153 c = h->cmd_pool + i;
8154 refcount = atomic_inc_return(&c->refcount);
8156 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
8158 atomic_dec(&h->commands_outstanding);
8163 dev_warn(&h->pdev->dev,
8164 "failed %d commands in fail_all\n", failcount);
8167 static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
8171 for_each_online_cpu(cpu) {
8172 u32 *lockup_detected;
8173 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
8174 *lockup_detected = value;
8176 wmb(); /* be sure the per-cpu variables are out to memory */
8179 static void controller_lockup_detected(struct ctlr_info *h)
8181 unsigned long flags;
8182 u32 lockup_detected;
8184 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8185 spin_lock_irqsave(&h->lock, flags);
8186 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
8187 if (!lockup_detected) {
8188 /* no heartbeat, but controller gave us a zero. */
8189 dev_warn(&h->pdev->dev,
8190 "lockup detected after %d but scratchpad register is zero\n",
8191 h->heartbeat_sample_interval / HZ);
8192 lockup_detected = 0xffffffff;
8194 set_lockup_detected_for_all_cpus(h, lockup_detected);
8195 spin_unlock_irqrestore(&h->lock, flags);
8196 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n",
8197 lockup_detected, h->heartbeat_sample_interval / HZ);
8198 if (lockup_detected == 0xffff0000) {
8199 dev_warn(&h->pdev->dev, "Telling controller to do a CHKPT\n");
8200 writel(DOORBELL_GENERATE_CHKPT, h->vaddr + SA5_DOORBELL);
8202 pci_disable_device(h->pdev);
8203 fail_all_outstanding_cmds(h);
8206 static int detect_controller_lockup(struct ctlr_info *h)
8210 unsigned long flags;
8212 now = get_jiffies_64();
8213 /* If we've received an interrupt recently, we're ok. */
8214 if (time_after64(h->last_intr_timestamp +
8215 (h->heartbeat_sample_interval), now))
8219 * If we've already checked the heartbeat recently, we're ok.
8220 * This could happen if someone sends us a signal. We
8221 * otherwise don't care about signals in this thread.
8223 if (time_after64(h->last_heartbeat_timestamp +
8224 (h->heartbeat_sample_interval), now))
8227 /* If heartbeat has not changed since we last looked, we're not ok. */
8228 spin_lock_irqsave(&h->lock, flags);
8229 heartbeat = readl(&h->cfgtable->HeartBeat);
8230 spin_unlock_irqrestore(&h->lock, flags);
8231 if (h->last_heartbeat == heartbeat) {
8232 controller_lockup_detected(h);
8237 h->last_heartbeat = heartbeat;
8238 h->last_heartbeat_timestamp = now;
8243 * Set ioaccel status for all ioaccel volumes.
8245 * Called from monitor controller worker (hpsa_event_monitor_worker)
8247 * A Volume (or Volumes that comprise an Array set) may be undergoing a
8248 * transformation, so we will be turning off ioaccel for all volumes that
8249 * make up the Array.
8251 static void hpsa_set_ioaccel_status(struct ctlr_info *h)
8257 struct hpsa_scsi_dev_t *device;
8262 buf = kmalloc(64, GFP_KERNEL);
8267 * Run through current device list used during I/O requests.
8269 for (i = 0; i < h->ndevices; i++) {
8270 int offload_to_be_enabled = 0;
8271 int offload_config = 0;
8277 if (!hpsa_vpd_page_supported(h, device->scsi3addr,
8278 HPSA_VPD_LV_IOACCEL_STATUS))
8283 rc = hpsa_scsi_do_inquiry(h, device->scsi3addr,
8284 VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS,
8289 ioaccel_status = buf[IOACCEL_STATUS_BYTE];
8292 * Check if offload is still configured on
8295 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
8297 * If offload is configured on, check to see if ioaccel
8298 * needs to be enabled.
8301 offload_to_be_enabled =
8302 !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
8305 * If ioaccel is to be re-enabled, re-enable later during the
8306 * scan operation so the driver can get a fresh raidmap
8307 * before turning ioaccel back on.
8309 if (offload_to_be_enabled)
8313 * Immediately turn off ioaccel for any volume the
8314 * controller tells us to. Some of the reasons could be:
8315 * transformation - change to the LVs of an Array.
8316 * degraded volume - component failure
8318 hpsa_turn_off_ioaccel_for_device(device);
8324 static void hpsa_ack_ctlr_events(struct ctlr_info *h)
8328 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
8331 /* Ask the controller to clear the events we're handling. */
8332 if ((h->transMethod & (CFGTBL_Trans_io_accel1
8333 | CFGTBL_Trans_io_accel2)) &&
8334 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
8335 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
8337 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
8338 event_type = "state change";
8339 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
8340 event_type = "configuration change";
8341 /* Stop sending new RAID offload reqs via the IO accelerator */
8342 scsi_block_requests(h->scsi_host);
8343 hpsa_set_ioaccel_status(h);
8344 hpsa_drain_accel_commands(h);
8345 /* Set 'accelerator path config change' bit */
8346 dev_warn(&h->pdev->dev,
8347 "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
8348 h->events, event_type);
8349 writel(h->events, &(h->cfgtable->clear_event_notify));
8350 /* Set the "clear event notify field update" bit 6 */
8351 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
8352 /* Wait until ctlr clears 'clear event notify field', bit 6 */
8353 hpsa_wait_for_clear_event_notify_ack(h);
8354 scsi_unblock_requests(h->scsi_host);
8356 /* Acknowledge controller notification events. */
8357 writel(h->events, &(h->cfgtable->clear_event_notify));
8358 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
8359 hpsa_wait_for_clear_event_notify_ack(h);
8364 /* Check a register on the controller to see if there are configuration
8365 * changes (added/changed/removed logical drives, etc.) which mean that
8366 * we should rescan the controller for devices.
8367 * Also check flag for driver-initiated rescan.
8369 static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
8371 if (h->drv_req_rescan) {
8372 h->drv_req_rescan = 0;
8376 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
8379 h->events = readl(&(h->cfgtable->event_notify));
8380 return h->events & RESCAN_REQUIRED_EVENT_BITS;
8384 * Check if any of the offline devices have become ready
8386 static int hpsa_offline_devices_ready(struct ctlr_info *h)
8388 unsigned long flags;
8389 struct offline_device_entry *d;
8390 struct list_head *this, *tmp;
8392 spin_lock_irqsave(&h->offline_device_lock, flags);
8393 list_for_each_safe(this, tmp, &h->offline_device_list) {
8394 d = list_entry(this, struct offline_device_entry,
8396 spin_unlock_irqrestore(&h->offline_device_lock, flags);
8397 if (!hpsa_volume_offline(h, d->scsi3addr)) {
8398 spin_lock_irqsave(&h->offline_device_lock, flags);
8399 list_del(&d->offline_list);
8400 spin_unlock_irqrestore(&h->offline_device_lock, flags);
8403 spin_lock_irqsave(&h->offline_device_lock, flags);
8405 spin_unlock_irqrestore(&h->offline_device_lock, flags);
8409 static int hpsa_luns_changed(struct ctlr_info *h)
8411 int rc = 1; /* assume there are changes */
8412 struct ReportLUNdata *logdev = NULL;
8414 /* if we can't find out if lun data has changed,
8415 * assume that it has.
8418 if (!h->lastlogicals)
8421 logdev = kzalloc(sizeof(*logdev), GFP_KERNEL);
8425 if (hpsa_scsi_do_report_luns(h, 1, logdev, sizeof(*logdev), 0)) {
8426 dev_warn(&h->pdev->dev,
8427 "report luns failed, can't track lun changes.\n");
8430 if (memcmp(logdev, h->lastlogicals, sizeof(*logdev))) {
8431 dev_info(&h->pdev->dev,
8432 "Lun changes detected.\n");
8433 memcpy(h->lastlogicals, logdev, sizeof(*logdev));
8436 rc = 0; /* no changes detected. */
8442 static void hpsa_perform_rescan(struct ctlr_info *h)
8444 struct Scsi_Host *sh = NULL;
8445 unsigned long flags;
8448 * Do the scan after the reset
8450 spin_lock_irqsave(&h->reset_lock, flags);
8451 if (h->reset_in_progress) {
8452 h->drv_req_rescan = 1;
8453 spin_unlock_irqrestore(&h->reset_lock, flags);
8456 spin_unlock_irqrestore(&h->reset_lock, flags);
8458 sh = scsi_host_get(h->scsi_host);
8460 hpsa_scan_start(sh);
8462 h->drv_req_rescan = 0;
8467 * watch for controller events
8469 static void hpsa_event_monitor_worker(struct work_struct *work)
8471 struct ctlr_info *h = container_of(to_delayed_work(work),
8472 struct ctlr_info, event_monitor_work);
8473 unsigned long flags;
8475 spin_lock_irqsave(&h->lock, flags);
8476 if (h->remove_in_progress) {
8477 spin_unlock_irqrestore(&h->lock, flags);
8480 spin_unlock_irqrestore(&h->lock, flags);
8482 if (hpsa_ctlr_needs_rescan(h)) {
8483 hpsa_ack_ctlr_events(h);
8484 hpsa_perform_rescan(h);
8487 spin_lock_irqsave(&h->lock, flags);
8488 if (!h->remove_in_progress)
8489 queue_delayed_work(h->monitor_ctlr_wq, &h->event_monitor_work,
8490 HPSA_EVENT_MONITOR_INTERVAL);
8491 spin_unlock_irqrestore(&h->lock, flags);
8494 static void hpsa_rescan_ctlr_worker(struct work_struct *work)
8496 unsigned long flags;
8497 struct ctlr_info *h = container_of(to_delayed_work(work),
8498 struct ctlr_info, rescan_ctlr_work);
8500 spin_lock_irqsave(&h->lock, flags);
8501 if (h->remove_in_progress) {
8502 spin_unlock_irqrestore(&h->lock, flags);
8505 spin_unlock_irqrestore(&h->lock, flags);
8507 if (h->drv_req_rescan || hpsa_offline_devices_ready(h)) {
8508 hpsa_perform_rescan(h);
8509 } else if (h->discovery_polling) {
8510 if (hpsa_luns_changed(h)) {
8511 dev_info(&h->pdev->dev,
8512 "driver discovery polling rescan.\n");
8513 hpsa_perform_rescan(h);
8516 spin_lock_irqsave(&h->lock, flags);
8517 if (!h->remove_in_progress)
8518 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
8519 h->heartbeat_sample_interval);
8520 spin_unlock_irqrestore(&h->lock, flags);
8523 static void hpsa_monitor_ctlr_worker(struct work_struct *work)
8525 unsigned long flags;
8526 struct ctlr_info *h = container_of(to_delayed_work(work),
8527 struct ctlr_info, monitor_ctlr_work);
8529 detect_controller_lockup(h);
8530 if (lockup_detected(h))
8533 spin_lock_irqsave(&h->lock, flags);
8534 if (!h->remove_in_progress)
8535 queue_delayed_work(h->monitor_ctlr_wq, &h->monitor_ctlr_work,
8536 h->heartbeat_sample_interval);
8537 spin_unlock_irqrestore(&h->lock, flags);
8540 static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h,
8543 struct workqueue_struct *wq = NULL;
8545 wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr);
8547 dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name);
8552 static void hpda_free_ctlr_info(struct ctlr_info *h)
8554 kfree(h->reply_map);
8558 static struct ctlr_info *hpda_alloc_ctlr_info(void)
8560 struct ctlr_info *h;
8562 h = kzalloc(sizeof(*h), GFP_KERNEL);
8566 h->reply_map = kcalloc(nr_cpu_ids, sizeof(*h->reply_map), GFP_KERNEL);
8567 if (!h->reply_map) {
8574 static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8577 struct ctlr_info *h;
8578 int try_soft_reset = 0;
8579 unsigned long flags;
8582 if (number_of_controllers == 0)
8583 printk(KERN_INFO DRIVER_NAME "\n");
8585 rc = hpsa_lookup_board_id(pdev, &board_id, NULL);
8587 dev_warn(&pdev->dev, "Board ID not found\n");
8591 rc = hpsa_init_reset_devices(pdev, board_id);
8593 if (rc != -ENOTSUPP)
8595 /* If the reset fails in a particular way (it has no way to do
8596 * a proper hard reset, so returns -ENOTSUPP) we can try to do
8597 * a soft reset once we get the controller configured up to the
8598 * point that it can accept a command.
8604 reinit_after_soft_reset:
8606 /* Command structures must be aligned on a 32-byte boundary because
8607 * the 5 lower bits of the address are used by the hardware. and by
8608 * the driver. See comments in hpsa.h for more info.
8610 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
8611 h = hpda_alloc_ctlr_info();
8613 dev_err(&pdev->dev, "Failed to allocate controller head\n");
8619 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
8620 INIT_LIST_HEAD(&h->offline_device_list);
8621 spin_lock_init(&h->lock);
8622 spin_lock_init(&h->offline_device_lock);
8623 spin_lock_init(&h->scan_lock);
8624 spin_lock_init(&h->reset_lock);
8625 atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS);
8627 /* Allocate and clear per-cpu variable lockup_detected */
8628 h->lockup_detected = alloc_percpu(u32);
8629 if (!h->lockup_detected) {
8630 dev_err(&h->pdev->dev, "Failed to allocate lockup detector\n");
8632 goto clean1; /* aer/h */
8634 set_lockup_detected_for_all_cpus(h, 0);
8636 rc = hpsa_pci_init(h);
8638 goto clean2; /* lu, aer/h */
8640 /* relies on h-> settings made by hpsa_pci_init, including
8641 * interrupt_mode h->intr */
8642 rc = hpsa_scsi_host_alloc(h);
8644 goto clean2_5; /* pci, lu, aer/h */
8646 sprintf(h->devname, HPSA "%d", h->scsi_host->host_no);
8647 h->ctlr = number_of_controllers;
8648 number_of_controllers++;
8650 /* configure PCI DMA stuff */
8651 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
8655 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8659 dev_err(&pdev->dev, "no suitable DMA available\n");
8660 goto clean3; /* shost, pci, lu, aer/h */
8664 /* make sure the board interrupts are off */
8665 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8667 rc = hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx);
8669 goto clean3; /* shost, pci, lu, aer/h */
8670 rc = hpsa_alloc_cmd_pool(h);
8672 goto clean4; /* irq, shost, pci, lu, aer/h */
8673 rc = hpsa_alloc_sg_chain_blocks(h);
8675 goto clean5; /* cmd, irq, shost, pci, lu, aer/h */
8676 init_waitqueue_head(&h->scan_wait_queue);
8677 init_waitqueue_head(&h->event_sync_wait_queue);
8678 mutex_init(&h->reset_mutex);
8679 h->scan_finished = 1; /* no scan currently in progress */
8680 h->scan_waiting = 0;
8682 pci_set_drvdata(pdev, h);
8685 spin_lock_init(&h->devlock);
8686 rc = hpsa_put_ctlr_into_performant_mode(h);
8688 goto clean6; /* sg, cmd, irq, shost, pci, lu, aer/h */
8690 /* create the resubmit workqueue */
8691 h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan");
8692 if (!h->rescan_ctlr_wq) {
8697 h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit");
8698 if (!h->resubmit_wq) {
8700 goto clean7; /* aer/h */
8703 h->monitor_ctlr_wq = hpsa_create_controller_wq(h, "monitor");
8704 if (!h->monitor_ctlr_wq) {
8710 * At this point, the controller is ready to take commands.
8711 * Now, if reset_devices and the hard reset didn't work, try
8712 * the soft reset and see if that works.
8714 if (try_soft_reset) {
8716 /* This is kind of gross. We may or may not get a completion
8717 * from the soft reset command, and if we do, then the value
8718 * from the fifo may or may not be valid. So, we wait 10 secs
8719 * after the reset throwing away any completions we get during
8720 * that time. Unregister the interrupt handler and register
8721 * fake ones to scoop up any residual completions.
8723 spin_lock_irqsave(&h->lock, flags);
8724 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8725 spin_unlock_irqrestore(&h->lock, flags);
8727 rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
8728 hpsa_intx_discard_completions);
8730 dev_warn(&h->pdev->dev,
8731 "Failed to request_irq after soft reset.\n");
8733 * cannot goto clean7 or free_irqs will be called
8734 * again. Instead, do its work
8736 hpsa_free_performant_mode(h); /* clean7 */
8737 hpsa_free_sg_chain_blocks(h); /* clean6 */
8738 hpsa_free_cmd_pool(h); /* clean5 */
8740 * skip hpsa_free_irqs(h) clean4 since that
8741 * was just called before request_irqs failed
8746 rc = hpsa_kdump_soft_reset(h);
8748 /* Neither hard nor soft reset worked, we're hosed. */
8751 dev_info(&h->pdev->dev, "Board READY.\n");
8752 dev_info(&h->pdev->dev,
8753 "Waiting for stale completions to drain.\n");
8754 h->access.set_intr_mask(h, HPSA_INTR_ON);
8756 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8758 rc = controller_reset_failed(h->cfgtable);
8760 dev_info(&h->pdev->dev,
8761 "Soft reset appears to have failed.\n");
8763 /* since the controller's reset, we have to go back and re-init
8764 * everything. Easiest to just forget what we've done and do it
8767 hpsa_undo_allocations_after_kdump_soft_reset(h);
8770 /* don't goto clean, we already unallocated */
8773 goto reinit_after_soft_reset;
8776 /* Enable Accelerated IO path at driver layer */
8777 h->acciopath_status = 1;
8778 /* Disable discovery polling.*/
8779 h->discovery_polling = 0;
8782 /* Turn the interrupts on so we can service requests */
8783 h->access.set_intr_mask(h, HPSA_INTR_ON);
8785 hpsa_hba_inquiry(h);
8787 h->lastlogicals = kzalloc(sizeof(*(h->lastlogicals)), GFP_KERNEL);
8788 if (!h->lastlogicals)
8789 dev_info(&h->pdev->dev,
8790 "Can't track change to report lun data\n");
8792 /* hook into SCSI subsystem */
8793 rc = hpsa_scsi_add_host(h);
8795 goto clean8; /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */
8797 /* Monitor the controller for firmware lockups */
8798 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
8799 INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
8800 schedule_delayed_work(&h->monitor_ctlr_work,
8801 h->heartbeat_sample_interval);
8802 INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker);
8803 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
8804 h->heartbeat_sample_interval);
8805 INIT_DELAYED_WORK(&h->event_monitor_work, hpsa_event_monitor_worker);
8806 schedule_delayed_work(&h->event_monitor_work,
8807 HPSA_EVENT_MONITOR_INTERVAL);
8810 clean8: /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */
8811 kfree(h->lastlogicals);
8812 clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
8813 hpsa_free_performant_mode(h);
8814 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8815 clean6: /* sg, cmd, irq, pci, lockup, wq/aer/h */
8816 hpsa_free_sg_chain_blocks(h);
8817 clean5: /* cmd, irq, shost, pci, lu, aer/h */
8818 hpsa_free_cmd_pool(h);
8819 clean4: /* irq, shost, pci, lu, aer/h */
8821 clean3: /* shost, pci, lu, aer/h */
8822 scsi_host_put(h->scsi_host);
8823 h->scsi_host = NULL;
8824 clean2_5: /* pci, lu, aer/h */
8825 hpsa_free_pci_init(h);
8826 clean2: /* lu, aer/h */
8827 if (h->lockup_detected) {
8828 free_percpu(h->lockup_detected);
8829 h->lockup_detected = NULL;
8831 clean1: /* wq/aer/h */
8832 if (h->resubmit_wq) {
8833 destroy_workqueue(h->resubmit_wq);
8834 h->resubmit_wq = NULL;
8836 if (h->rescan_ctlr_wq) {
8837 destroy_workqueue(h->rescan_ctlr_wq);
8838 h->rescan_ctlr_wq = NULL;
8840 if (h->monitor_ctlr_wq) {
8841 destroy_workqueue(h->monitor_ctlr_wq);
8842 h->monitor_ctlr_wq = NULL;
8844 hpda_free_ctlr_info(h);
8848 static void hpsa_flush_cache(struct ctlr_info *h)
8851 struct CommandList *c;
8854 if (unlikely(lockup_detected(h)))
8856 flush_buf = kzalloc(4, GFP_KERNEL);
8862 if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
8863 RAID_CTLR_LUNID, TYPE_CMD)) {
8866 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
8867 PCI_DMA_TODEVICE, DEFAULT_TIMEOUT);
8870 if (c->err_info->CommandStatus != 0)
8872 dev_warn(&h->pdev->dev,
8873 "error flushing cache on controller\n");
8878 /* Make controller gather fresh report lun data each time we
8879 * send down a report luns request
8881 static void hpsa_disable_rld_caching(struct ctlr_info *h)
8884 struct CommandList *c;
8887 /* Don't bother trying to set diag options if locked up */
8888 if (unlikely(h->lockup_detected))
8891 options = kzalloc(sizeof(*options), GFP_KERNEL);
8897 /* first, get the current diag options settings */
8898 if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0,
8899 RAID_CTLR_LUNID, TYPE_CMD))
8902 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
8903 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
8904 if ((rc != 0) || (c->err_info->CommandStatus != 0))
8907 /* Now, set the bit for disabling the RLD caching */
8908 *options |= HPSA_DIAG_OPTS_DISABLE_RLD_CACHING;
8910 if (fill_cmd(c, BMIC_SET_DIAG_OPTIONS, h, options, 4, 0,
8911 RAID_CTLR_LUNID, TYPE_CMD))
8914 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
8915 PCI_DMA_TODEVICE, NO_TIMEOUT);
8916 if ((rc != 0) || (c->err_info->CommandStatus != 0))
8919 /* Now verify that it got set: */
8920 if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0,
8921 RAID_CTLR_LUNID, TYPE_CMD))
8924 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
8925 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
8926 if ((rc != 0) || (c->err_info->CommandStatus != 0))
8929 if (*options & HPSA_DIAG_OPTS_DISABLE_RLD_CACHING)
8933 dev_err(&h->pdev->dev,
8934 "Error: failed to disable report lun data caching.\n");
8940 static void __hpsa_shutdown(struct pci_dev *pdev)
8942 struct ctlr_info *h;
8944 h = pci_get_drvdata(pdev);
8945 /* Turn board interrupts off and send the flush cache command
8946 * sendcmd will turn off interrupt, and send the flush...
8947 * To write all data in the battery backed cache to disks
8949 hpsa_flush_cache(h);
8950 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8951 hpsa_free_irqs(h); /* init_one 4 */
8952 hpsa_disable_interrupt_mode(h); /* pci_init 2 */
8955 static void hpsa_shutdown(struct pci_dev *pdev)
8957 __hpsa_shutdown(pdev);
8958 pci_disable_device(pdev);
8961 static void hpsa_free_device_info(struct ctlr_info *h)
8965 for (i = 0; i < h->ndevices; i++) {
8971 static void hpsa_remove_one(struct pci_dev *pdev)
8973 struct ctlr_info *h;
8974 unsigned long flags;
8976 if (pci_get_drvdata(pdev) == NULL) {
8977 dev_err(&pdev->dev, "unable to remove device\n");
8980 h = pci_get_drvdata(pdev);
8982 /* Get rid of any controller monitoring work items */
8983 spin_lock_irqsave(&h->lock, flags);
8984 h->remove_in_progress = 1;
8985 spin_unlock_irqrestore(&h->lock, flags);
8986 cancel_delayed_work_sync(&h->monitor_ctlr_work);
8987 cancel_delayed_work_sync(&h->rescan_ctlr_work);
8988 cancel_delayed_work_sync(&h->event_monitor_work);
8989 destroy_workqueue(h->rescan_ctlr_wq);
8990 destroy_workqueue(h->resubmit_wq);
8991 destroy_workqueue(h->monitor_ctlr_wq);
8993 hpsa_delete_sas_host(h);
8996 * Call before disabling interrupts.
8997 * scsi_remove_host can trigger I/O operations especially
8998 * when multipath is enabled. There can be SYNCHRONIZE CACHE
8999 * operations which cannot complete and will hang the system.
9002 scsi_remove_host(h->scsi_host); /* init_one 8 */
9003 /* includes hpsa_free_irqs - init_one 4 */
9004 /* includes hpsa_disable_interrupt_mode - pci_init 2 */
9005 __hpsa_shutdown(pdev);
9007 hpsa_free_device_info(h); /* scan */
9009 kfree(h->hba_inquiry_data); /* init_one 10 */
9010 h->hba_inquiry_data = NULL; /* init_one 10 */
9011 hpsa_free_ioaccel2_sg_chain_blocks(h);
9012 hpsa_free_performant_mode(h); /* init_one 7 */
9013 hpsa_free_sg_chain_blocks(h); /* init_one 6 */
9014 hpsa_free_cmd_pool(h); /* init_one 5 */
9015 kfree(h->lastlogicals);
9017 /* hpsa_free_irqs already called via hpsa_shutdown init_one 4 */
9019 scsi_host_put(h->scsi_host); /* init_one 3 */
9020 h->scsi_host = NULL; /* init_one 3 */
9022 /* includes hpsa_disable_interrupt_mode - pci_init 2 */
9023 hpsa_free_pci_init(h); /* init_one 2.5 */
9025 free_percpu(h->lockup_detected); /* init_one 2 */
9026 h->lockup_detected = NULL; /* init_one 2 */
9027 /* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */
9029 hpda_free_ctlr_info(h); /* init_one 1 */
9032 static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
9033 __attribute__((unused)) pm_message_t state)
9038 static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
9043 static struct pci_driver hpsa_pci_driver = {
9045 .probe = hpsa_init_one,
9046 .remove = hpsa_remove_one,
9047 .id_table = hpsa_pci_device_id, /* id_table */
9048 .shutdown = hpsa_shutdown,
9049 .suspend = hpsa_suspend,
9050 .resume = hpsa_resume,
9053 /* Fill in bucket_map[], given nsgs (the max number of
9054 * scatter gather elements supported) and bucket[],
9055 * which is an array of 8 integers. The bucket[] array
9056 * contains 8 different DMA transfer sizes (in 16
9057 * byte increments) which the controller uses to fetch
9058 * commands. This function fills in bucket_map[], which
9059 * maps a given number of scatter gather elements to one of
9060 * the 8 DMA transfer sizes. The point of it is to allow the
9061 * controller to only do as much DMA as needed to fetch the
9062 * command, with the DMA transfer size encoded in the lower
9063 * bits of the command address.
9065 static void calc_bucket_map(int bucket[], int num_buckets,
9066 int nsgs, int min_blocks, u32 *bucket_map)
9070 /* Note, bucket_map must have nsgs+1 entries. */
9071 for (i = 0; i <= nsgs; i++) {
9072 /* Compute size of a command with i SG entries */
9073 size = i + min_blocks;
9074 b = num_buckets; /* Assume the biggest bucket */
9075 /* Find the bucket that is just big enough */
9076 for (j = 0; j < num_buckets; j++) {
9077 if (bucket[j] >= size) {
9082 /* for a command with i SG entries, use bucket b. */
9088 * return -ENODEV on err, 0 on success (or no action)
9089 * allocates numerous items that must be freed later
9091 static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
9094 unsigned long register_value;
9095 unsigned long transMethod = CFGTBL_Trans_Performant |
9096 (trans_support & CFGTBL_Trans_use_short_tags) |
9097 CFGTBL_Trans_enable_directed_msix |
9098 (trans_support & (CFGTBL_Trans_io_accel1 |
9099 CFGTBL_Trans_io_accel2));
9100 struct access_method access = SA5_performant_access;
9102 /* This is a bit complicated. There are 8 registers on
9103 * the controller which we write to to tell it 8 different
9104 * sizes of commands which there may be. It's a way of
9105 * reducing the DMA done to fetch each command. Encoded into
9106 * each command's tag are 3 bits which communicate to the controller
9107 * which of the eight sizes that command fits within. The size of
9108 * each command depends on how many scatter gather entries there are.
9109 * Each SG entry requires 16 bytes. The eight registers are programmed
9110 * with the number of 16-byte blocks a command of that size requires.
9111 * The smallest command possible requires 5 such 16 byte blocks.
9112 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
9113 * blocks. Note, this only extends to the SG entries contained
9114 * within the command block, and does not extend to chained blocks
9115 * of SG elements. bft[] contains the eight values we write to
9116 * the registers. They are not evenly distributed, but have more
9117 * sizes for small commands, and fewer sizes for larger commands.
9119 int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
9120 #define MIN_IOACCEL2_BFT_ENTRY 5
9121 #define HPSA_IOACCEL2_HEADER_SZ 4
9122 int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
9123 13, 14, 15, 16, 17, 18, 19,
9124 HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
9125 BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
9126 BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
9127 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
9128 16 * MIN_IOACCEL2_BFT_ENTRY);
9129 BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
9130 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
9131 /* 5 = 1 s/g entry or 4k
9132 * 6 = 2 s/g entry or 8k
9133 * 8 = 4 s/g entry or 16k
9134 * 10 = 6 s/g entry or 24k
9137 /* If the controller supports either ioaccel method then
9138 * we can also use the RAID stack submit path that does not
9139 * perform the superfluous readl() after each command submission.
9141 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
9142 access = SA5_performant_access_no_read;
9144 /* Controller spec: zero out this buffer. */
9145 for (i = 0; i < h->nreply_queues; i++)
9146 memset(h->reply_queue[i].head, 0, h->reply_queue_size);
9148 bft[7] = SG_ENTRIES_IN_CMD + 4;
9149 calc_bucket_map(bft, ARRAY_SIZE(bft),
9150 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
9151 for (i = 0; i < 8; i++)
9152 writel(bft[i], &h->transtable->BlockFetch[i]);
9154 /* size of controller ring buffer */
9155 writel(h->max_commands, &h->transtable->RepQSize);
9156 writel(h->nreply_queues, &h->transtable->RepQCount);
9157 writel(0, &h->transtable->RepQCtrAddrLow32);
9158 writel(0, &h->transtable->RepQCtrAddrHigh32);
9160 for (i = 0; i < h->nreply_queues; i++) {
9161 writel(0, &h->transtable->RepQAddr[i].upper);
9162 writel(h->reply_queue[i].busaddr,
9163 &h->transtable->RepQAddr[i].lower);
9166 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
9167 writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
9169 * enable outbound interrupt coalescing in accelerator mode;
9171 if (trans_support & CFGTBL_Trans_io_accel1) {
9172 access = SA5_ioaccel_mode1_access;
9173 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
9174 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
9176 if (trans_support & CFGTBL_Trans_io_accel2)
9177 access = SA5_ioaccel_mode2_access;
9178 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
9179 if (hpsa_wait_for_mode_change_ack(h)) {
9180 dev_err(&h->pdev->dev,
9181 "performant mode problem - doorbell timeout\n");
9184 register_value = readl(&(h->cfgtable->TransportActive));
9185 if (!(register_value & CFGTBL_Trans_Performant)) {
9186 dev_err(&h->pdev->dev,
9187 "performant mode problem - transport not active\n");
9190 /* Change the access methods to the performant access methods */
9192 h->transMethod = transMethod;
9194 if (!((trans_support & CFGTBL_Trans_io_accel1) ||
9195 (trans_support & CFGTBL_Trans_io_accel2)))
9198 if (trans_support & CFGTBL_Trans_io_accel1) {
9199 /* Set up I/O accelerator mode */
9200 for (i = 0; i < h->nreply_queues; i++) {
9201 writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
9202 h->reply_queue[i].current_entry =
9203 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
9205 bft[7] = h->ioaccel_maxsg + 8;
9206 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
9207 h->ioaccel1_blockFetchTable);
9209 /* initialize all reply queue entries to unused */
9210 for (i = 0; i < h->nreply_queues; i++)
9211 memset(h->reply_queue[i].head,
9212 (u8) IOACCEL_MODE1_REPLY_UNUSED,
9213 h->reply_queue_size);
9215 /* set all the constant fields in the accelerator command
9216 * frames once at init time to save CPU cycles later.
9218 for (i = 0; i < h->nr_cmds; i++) {
9219 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
9221 cp->function = IOACCEL1_FUNCTION_SCSIIO;
9222 cp->err_info = (u32) (h->errinfo_pool_dhandle +
9223 (i * sizeof(struct ErrorInfo)));
9224 cp->err_info_len = sizeof(struct ErrorInfo);
9225 cp->sgl_offset = IOACCEL1_SGLOFFSET;
9226 cp->host_context_flags =
9227 cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT);
9228 cp->timeout_sec = 0;
9231 cpu_to_le64((i << DIRECT_LOOKUP_SHIFT));
9233 cpu_to_le64(h->ioaccel_cmd_pool_dhandle +
9234 (i * sizeof(struct io_accel1_cmd)));
9236 } else if (trans_support & CFGTBL_Trans_io_accel2) {
9237 u64 cfg_offset, cfg_base_addr_index;
9238 u32 bft2_offset, cfg_base_addr;
9241 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
9242 &cfg_base_addr_index, &cfg_offset);
9243 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
9244 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
9245 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
9246 4, h->ioaccel2_blockFetchTable);
9247 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
9248 BUILD_BUG_ON(offsetof(struct CfgTable,
9249 io_accel_request_size_offset) != 0xb8);
9250 h->ioaccel2_bft2_regs =
9251 remap_pci_mem(pci_resource_start(h->pdev,
9252 cfg_base_addr_index) +
9253 cfg_offset + bft2_offset,
9255 sizeof(*h->ioaccel2_bft2_regs));
9256 for (i = 0; i < ARRAY_SIZE(bft2); i++)
9257 writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
9259 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
9260 if (hpsa_wait_for_mode_change_ack(h)) {
9261 dev_err(&h->pdev->dev,
9262 "performant mode problem - enabling ioaccel mode\n");
9268 /* Free ioaccel1 mode command blocks and block fetch table */
9269 static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h)
9271 if (h->ioaccel_cmd_pool) {
9272 pci_free_consistent(h->pdev,
9273 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
9274 h->ioaccel_cmd_pool,
9275 h->ioaccel_cmd_pool_dhandle);
9276 h->ioaccel_cmd_pool = NULL;
9277 h->ioaccel_cmd_pool_dhandle = 0;
9279 kfree(h->ioaccel1_blockFetchTable);
9280 h->ioaccel1_blockFetchTable = NULL;
9283 /* Allocate ioaccel1 mode command blocks and block fetch table */
9284 static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h)
9287 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
9288 if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
9289 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
9291 /* Command structures must be aligned on a 128-byte boundary
9292 * because the 7 lower bits of the address are used by the
9295 BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
9296 IOACCEL1_COMMANDLIST_ALIGNMENT);
9297 h->ioaccel_cmd_pool =
9298 pci_alloc_consistent(h->pdev,
9299 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
9300 &(h->ioaccel_cmd_pool_dhandle));
9302 h->ioaccel1_blockFetchTable =
9303 kmalloc(((h->ioaccel_maxsg + 1) *
9304 sizeof(u32)), GFP_KERNEL);
9306 if ((h->ioaccel_cmd_pool == NULL) ||
9307 (h->ioaccel1_blockFetchTable == NULL))
9310 memset(h->ioaccel_cmd_pool, 0,
9311 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
9315 hpsa_free_ioaccel1_cmd_and_bft(h);
9319 /* Free ioaccel2 mode command blocks and block fetch table */
9320 static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h)
9322 hpsa_free_ioaccel2_sg_chain_blocks(h);
9324 if (h->ioaccel2_cmd_pool) {
9325 pci_free_consistent(h->pdev,
9326 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
9327 h->ioaccel2_cmd_pool,
9328 h->ioaccel2_cmd_pool_dhandle);
9329 h->ioaccel2_cmd_pool = NULL;
9330 h->ioaccel2_cmd_pool_dhandle = 0;
9332 kfree(h->ioaccel2_blockFetchTable);
9333 h->ioaccel2_blockFetchTable = NULL;
9336 /* Allocate ioaccel2 mode command blocks and block fetch table */
9337 static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h)
9341 /* Allocate ioaccel2 mode command blocks and block fetch table */
9344 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
9345 if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
9346 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
9348 BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
9349 IOACCEL2_COMMANDLIST_ALIGNMENT);
9350 h->ioaccel2_cmd_pool =
9351 pci_alloc_consistent(h->pdev,
9352 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
9353 &(h->ioaccel2_cmd_pool_dhandle));
9355 h->ioaccel2_blockFetchTable =
9356 kmalloc(((h->ioaccel_maxsg + 1) *
9357 sizeof(u32)), GFP_KERNEL);
9359 if ((h->ioaccel2_cmd_pool == NULL) ||
9360 (h->ioaccel2_blockFetchTable == NULL)) {
9365 rc = hpsa_allocate_ioaccel2_sg_chain_blocks(h);
9369 memset(h->ioaccel2_cmd_pool, 0,
9370 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
9374 hpsa_free_ioaccel2_cmd_and_bft(h);
9378 /* Free items allocated by hpsa_put_ctlr_into_performant_mode */
9379 static void hpsa_free_performant_mode(struct ctlr_info *h)
9381 kfree(h->blockFetchTable);
9382 h->blockFetchTable = NULL;
9383 hpsa_free_reply_queues(h);
9384 hpsa_free_ioaccel1_cmd_and_bft(h);
9385 hpsa_free_ioaccel2_cmd_and_bft(h);
9388 /* return -ENODEV on error, 0 on success (or no action)
9389 * allocates numerous items that must be freed later
9391 static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
9394 unsigned long transMethod = CFGTBL_Trans_Performant |
9395 CFGTBL_Trans_use_short_tags;
9398 if (hpsa_simple_mode)
9401 trans_support = readl(&(h->cfgtable->TransportSupport));
9402 if (!(trans_support & PERFORMANT_MODE))
9405 /* Check for I/O accelerator mode support */
9406 if (trans_support & CFGTBL_Trans_io_accel1) {
9407 transMethod |= CFGTBL_Trans_io_accel1 |
9408 CFGTBL_Trans_enable_directed_msix;
9409 rc = hpsa_alloc_ioaccel1_cmd_and_bft(h);
9412 } else if (trans_support & CFGTBL_Trans_io_accel2) {
9413 transMethod |= CFGTBL_Trans_io_accel2 |
9414 CFGTBL_Trans_enable_directed_msix;
9415 rc = hpsa_alloc_ioaccel2_cmd_and_bft(h);
9420 h->nreply_queues = h->msix_vectors > 0 ? h->msix_vectors : 1;
9421 hpsa_get_max_perf_mode_cmds(h);
9422 /* Performant mode ring buffer and supporting data structures */
9423 h->reply_queue_size = h->max_commands * sizeof(u64);
9425 for (i = 0; i < h->nreply_queues; i++) {
9426 h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
9427 h->reply_queue_size,
9428 &(h->reply_queue[i].busaddr));
9429 if (!h->reply_queue[i].head) {
9431 goto clean1; /* rq, ioaccel */
9433 h->reply_queue[i].size = h->max_commands;
9434 h->reply_queue[i].wraparound = 1; /* spec: init to 1 */
9435 h->reply_queue[i].current_entry = 0;
9438 /* Need a block fetch table for performant mode */
9439 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
9440 sizeof(u32)), GFP_KERNEL);
9441 if (!h->blockFetchTable) {
9443 goto clean1; /* rq, ioaccel */
9446 rc = hpsa_enter_performant_mode(h, trans_support);
9448 goto clean2; /* bft, rq, ioaccel */
9451 clean2: /* bft, rq, ioaccel */
9452 kfree(h->blockFetchTable);
9453 h->blockFetchTable = NULL;
9454 clean1: /* rq, ioaccel */
9455 hpsa_free_reply_queues(h);
9456 hpsa_free_ioaccel1_cmd_and_bft(h);
9457 hpsa_free_ioaccel2_cmd_and_bft(h);
9461 static int is_accelerated_cmd(struct CommandList *c)
9463 return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
9466 static void hpsa_drain_accel_commands(struct ctlr_info *h)
9468 struct CommandList *c = NULL;
9469 int i, accel_cmds_out;
9472 do { /* wait for all outstanding ioaccel commands to drain out */
9474 for (i = 0; i < h->nr_cmds; i++) {
9475 c = h->cmd_pool + i;
9476 refcount = atomic_inc_return(&c->refcount);
9477 if (refcount > 1) /* Command is allocated */
9478 accel_cmds_out += is_accelerated_cmd(c);
9481 if (accel_cmds_out <= 0)
9487 static struct hpsa_sas_phy *hpsa_alloc_sas_phy(
9488 struct hpsa_sas_port *hpsa_sas_port)
9490 struct hpsa_sas_phy *hpsa_sas_phy;
9491 struct sas_phy *phy;
9493 hpsa_sas_phy = kzalloc(sizeof(*hpsa_sas_phy), GFP_KERNEL);
9497 phy = sas_phy_alloc(hpsa_sas_port->parent_node->parent_dev,
9498 hpsa_sas_port->next_phy_index);
9500 kfree(hpsa_sas_phy);
9504 hpsa_sas_port->next_phy_index++;
9505 hpsa_sas_phy->phy = phy;
9506 hpsa_sas_phy->parent_port = hpsa_sas_port;
9508 return hpsa_sas_phy;
9511 static void hpsa_free_sas_phy(struct hpsa_sas_phy *hpsa_sas_phy)
9513 struct sas_phy *phy = hpsa_sas_phy->phy;
9515 sas_port_delete_phy(hpsa_sas_phy->parent_port->port, phy);
9516 if (hpsa_sas_phy->added_to_port)
9517 list_del(&hpsa_sas_phy->phy_list_entry);
9518 sas_phy_delete(phy);
9519 kfree(hpsa_sas_phy);
9522 static int hpsa_sas_port_add_phy(struct hpsa_sas_phy *hpsa_sas_phy)
9525 struct hpsa_sas_port *hpsa_sas_port;
9526 struct sas_phy *phy;
9527 struct sas_identify *identify;
9529 hpsa_sas_port = hpsa_sas_phy->parent_port;
9530 phy = hpsa_sas_phy->phy;
9532 identify = &phy->identify;
9533 memset(identify, 0, sizeof(*identify));
9534 identify->sas_address = hpsa_sas_port->sas_address;
9535 identify->device_type = SAS_END_DEVICE;
9536 identify->initiator_port_protocols = SAS_PROTOCOL_STP;
9537 identify->target_port_protocols = SAS_PROTOCOL_STP;
9538 phy->minimum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
9539 phy->maximum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
9540 phy->minimum_linkrate = SAS_LINK_RATE_UNKNOWN;
9541 phy->maximum_linkrate = SAS_LINK_RATE_UNKNOWN;
9542 phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
9544 rc = sas_phy_add(hpsa_sas_phy->phy);
9548 sas_port_add_phy(hpsa_sas_port->port, hpsa_sas_phy->phy);
9549 list_add_tail(&hpsa_sas_phy->phy_list_entry,
9550 &hpsa_sas_port->phy_list_head);
9551 hpsa_sas_phy->added_to_port = true;
9557 hpsa_sas_port_add_rphy(struct hpsa_sas_port *hpsa_sas_port,
9558 struct sas_rphy *rphy)
9560 struct sas_identify *identify;
9562 identify = &rphy->identify;
9563 identify->sas_address = hpsa_sas_port->sas_address;
9564 identify->initiator_port_protocols = SAS_PROTOCOL_STP;
9565 identify->target_port_protocols = SAS_PROTOCOL_STP;
9567 return sas_rphy_add(rphy);
9570 static struct hpsa_sas_port
9571 *hpsa_alloc_sas_port(struct hpsa_sas_node *hpsa_sas_node,
9575 struct hpsa_sas_port *hpsa_sas_port;
9576 struct sas_port *port;
9578 hpsa_sas_port = kzalloc(sizeof(*hpsa_sas_port), GFP_KERNEL);
9582 INIT_LIST_HEAD(&hpsa_sas_port->phy_list_head);
9583 hpsa_sas_port->parent_node = hpsa_sas_node;
9585 port = sas_port_alloc_num(hpsa_sas_node->parent_dev);
9587 goto free_hpsa_port;
9589 rc = sas_port_add(port);
9593 hpsa_sas_port->port = port;
9594 hpsa_sas_port->sas_address = sas_address;
9595 list_add_tail(&hpsa_sas_port->port_list_entry,
9596 &hpsa_sas_node->port_list_head);
9598 return hpsa_sas_port;
9601 sas_port_free(port);
9603 kfree(hpsa_sas_port);
9608 static void hpsa_free_sas_port(struct hpsa_sas_port *hpsa_sas_port)
9610 struct hpsa_sas_phy *hpsa_sas_phy;
9611 struct hpsa_sas_phy *next;
9613 list_for_each_entry_safe(hpsa_sas_phy, next,
9614 &hpsa_sas_port->phy_list_head, phy_list_entry)
9615 hpsa_free_sas_phy(hpsa_sas_phy);
9617 sas_port_delete(hpsa_sas_port->port);
9618 list_del(&hpsa_sas_port->port_list_entry);
9619 kfree(hpsa_sas_port);
9622 static struct hpsa_sas_node *hpsa_alloc_sas_node(struct device *parent_dev)
9624 struct hpsa_sas_node *hpsa_sas_node;
9626 hpsa_sas_node = kzalloc(sizeof(*hpsa_sas_node), GFP_KERNEL);
9627 if (hpsa_sas_node) {
9628 hpsa_sas_node->parent_dev = parent_dev;
9629 INIT_LIST_HEAD(&hpsa_sas_node->port_list_head);
9632 return hpsa_sas_node;
9635 static void hpsa_free_sas_node(struct hpsa_sas_node *hpsa_sas_node)
9637 struct hpsa_sas_port *hpsa_sas_port;
9638 struct hpsa_sas_port *next;
9643 list_for_each_entry_safe(hpsa_sas_port, next,
9644 &hpsa_sas_node->port_list_head, port_list_entry)
9645 hpsa_free_sas_port(hpsa_sas_port);
9647 kfree(hpsa_sas_node);
9650 static struct hpsa_scsi_dev_t
9651 *hpsa_find_device_by_sas_rphy(struct ctlr_info *h,
9652 struct sas_rphy *rphy)
9655 struct hpsa_scsi_dev_t *device;
9657 for (i = 0; i < h->ndevices; i++) {
9659 if (!device->sas_port)
9661 if (device->sas_port->rphy == rphy)
9668 static int hpsa_add_sas_host(struct ctlr_info *h)
9671 struct device *parent_dev;
9672 struct hpsa_sas_node *hpsa_sas_node;
9673 struct hpsa_sas_port *hpsa_sas_port;
9674 struct hpsa_sas_phy *hpsa_sas_phy;
9676 parent_dev = &h->scsi_host->shost_dev;
9678 hpsa_sas_node = hpsa_alloc_sas_node(parent_dev);
9682 hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, h->sas_address);
9683 if (!hpsa_sas_port) {
9688 hpsa_sas_phy = hpsa_alloc_sas_phy(hpsa_sas_port);
9689 if (!hpsa_sas_phy) {
9694 rc = hpsa_sas_port_add_phy(hpsa_sas_phy);
9698 h->sas_host = hpsa_sas_node;
9703 sas_phy_free(hpsa_sas_phy->phy);
9704 kfree(hpsa_sas_phy);
9706 hpsa_free_sas_port(hpsa_sas_port);
9708 hpsa_free_sas_node(hpsa_sas_node);
9713 static void hpsa_delete_sas_host(struct ctlr_info *h)
9715 hpsa_free_sas_node(h->sas_host);
9718 static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node,
9719 struct hpsa_scsi_dev_t *device)
9722 struct hpsa_sas_port *hpsa_sas_port;
9723 struct sas_rphy *rphy;
9725 hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, device->sas_address);
9729 rphy = sas_end_device_alloc(hpsa_sas_port->port);
9735 hpsa_sas_port->rphy = rphy;
9736 device->sas_port = hpsa_sas_port;
9738 rc = hpsa_sas_port_add_rphy(hpsa_sas_port, rphy);
9745 sas_rphy_free(rphy);
9747 hpsa_free_sas_port(hpsa_sas_port);
9748 device->sas_port = NULL;
9753 static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device)
9755 if (device->sas_port) {
9756 hpsa_free_sas_port(device->sas_port);
9757 device->sas_port = NULL;
9762 hpsa_sas_get_linkerrors(struct sas_phy *phy)
9768 hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
9770 struct Scsi_Host *shost = phy_to_shost(rphy);
9771 struct ctlr_info *h;
9772 struct hpsa_scsi_dev_t *sd;
9777 h = shost_to_hba(shost);
9782 sd = hpsa_find_device_by_sas_rphy(h, rphy);
9786 *identifier = sd->eli;
9792 hpsa_sas_get_bay_identifier(struct sas_rphy *rphy)
9798 hpsa_sas_phy_reset(struct sas_phy *phy, int hard_reset)
9804 hpsa_sas_phy_enable(struct sas_phy *phy, int enable)
9810 hpsa_sas_phy_setup(struct sas_phy *phy)
9816 hpsa_sas_phy_release(struct sas_phy *phy)
9821 hpsa_sas_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
9826 static struct sas_function_template hpsa_sas_transport_functions = {
9827 .get_linkerrors = hpsa_sas_get_linkerrors,
9828 .get_enclosure_identifier = hpsa_sas_get_enclosure_identifier,
9829 .get_bay_identifier = hpsa_sas_get_bay_identifier,
9830 .phy_reset = hpsa_sas_phy_reset,
9831 .phy_enable = hpsa_sas_phy_enable,
9832 .phy_setup = hpsa_sas_phy_setup,
9833 .phy_release = hpsa_sas_phy_release,
9834 .set_phy_speed = hpsa_sas_phy_speed,
9838 * This is it. Register the PCI driver information for the cards we control
9839 * the OS will call our registered routines when it finds one of our cards.
9841 static int __init hpsa_init(void)
9845 hpsa_sas_transport_template =
9846 sas_attach_transport(&hpsa_sas_transport_functions);
9847 if (!hpsa_sas_transport_template)
9850 rc = pci_register_driver(&hpsa_pci_driver);
9853 sas_release_transport(hpsa_sas_transport_template);
9858 static void __exit hpsa_cleanup(void)
9860 pci_unregister_driver(&hpsa_pci_driver);
9861 sas_release_transport(hpsa_sas_transport_template);
9864 static void __attribute__((unused)) verify_offsets(void)
9866 #define VERIFY_OFFSET(member, offset) \
9867 BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
9869 VERIFY_OFFSET(structure_size, 0);
9870 VERIFY_OFFSET(volume_blk_size, 4);
9871 VERIFY_OFFSET(volume_blk_cnt, 8);
9872 VERIFY_OFFSET(phys_blk_shift, 16);
9873 VERIFY_OFFSET(parity_rotation_shift, 17);
9874 VERIFY_OFFSET(strip_size, 18);
9875 VERIFY_OFFSET(disk_starting_blk, 20);
9876 VERIFY_OFFSET(disk_blk_cnt, 28);
9877 VERIFY_OFFSET(data_disks_per_row, 36);
9878 VERIFY_OFFSET(metadata_disks_per_row, 38);
9879 VERIFY_OFFSET(row_cnt, 40);
9880 VERIFY_OFFSET(layout_map_count, 42);
9881 VERIFY_OFFSET(flags, 44);
9882 VERIFY_OFFSET(dekindex, 46);
9883 /* VERIFY_OFFSET(reserved, 48 */
9884 VERIFY_OFFSET(data, 64);
9886 #undef VERIFY_OFFSET
9888 #define VERIFY_OFFSET(member, offset) \
9889 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
9891 VERIFY_OFFSET(IU_type, 0);
9892 VERIFY_OFFSET(direction, 1);
9893 VERIFY_OFFSET(reply_queue, 2);
9894 /* VERIFY_OFFSET(reserved1, 3); */
9895 VERIFY_OFFSET(scsi_nexus, 4);
9896 VERIFY_OFFSET(Tag, 8);
9897 VERIFY_OFFSET(cdb, 16);
9898 VERIFY_OFFSET(cciss_lun, 32);
9899 VERIFY_OFFSET(data_len, 40);
9900 VERIFY_OFFSET(cmd_priority_task_attr, 44);
9901 VERIFY_OFFSET(sg_count, 45);
9902 /* VERIFY_OFFSET(reserved3 */
9903 VERIFY_OFFSET(err_ptr, 48);
9904 VERIFY_OFFSET(err_len, 56);
9905 /* VERIFY_OFFSET(reserved4 */
9906 VERIFY_OFFSET(sg, 64);
9908 #undef VERIFY_OFFSET
9910 #define VERIFY_OFFSET(member, offset) \
9911 BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
9913 VERIFY_OFFSET(dev_handle, 0x00);
9914 VERIFY_OFFSET(reserved1, 0x02);
9915 VERIFY_OFFSET(function, 0x03);
9916 VERIFY_OFFSET(reserved2, 0x04);
9917 VERIFY_OFFSET(err_info, 0x0C);
9918 VERIFY_OFFSET(reserved3, 0x10);
9919 VERIFY_OFFSET(err_info_len, 0x12);
9920 VERIFY_OFFSET(reserved4, 0x13);
9921 VERIFY_OFFSET(sgl_offset, 0x14);
9922 VERIFY_OFFSET(reserved5, 0x15);
9923 VERIFY_OFFSET(transfer_len, 0x1C);
9924 VERIFY_OFFSET(reserved6, 0x20);
9925 VERIFY_OFFSET(io_flags, 0x24);
9926 VERIFY_OFFSET(reserved7, 0x26);
9927 VERIFY_OFFSET(LUN, 0x34);
9928 VERIFY_OFFSET(control, 0x3C);
9929 VERIFY_OFFSET(CDB, 0x40);
9930 VERIFY_OFFSET(reserved8, 0x50);
9931 VERIFY_OFFSET(host_context_flags, 0x60);
9932 VERIFY_OFFSET(timeout_sec, 0x62);
9933 VERIFY_OFFSET(ReplyQueue, 0x64);
9934 VERIFY_OFFSET(reserved9, 0x65);
9935 VERIFY_OFFSET(tag, 0x68);
9936 VERIFY_OFFSET(host_addr, 0x70);
9937 VERIFY_OFFSET(CISS_LUN, 0x78);
9938 VERIFY_OFFSET(SG, 0x78 + 8);
9939 #undef VERIFY_OFFSET
9942 module_init(hpsa_init);
9943 module_exit(hpsa_cleanup);