1 // SPDX-License-Identifier: GPL-2.0
3 * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
5 * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
7 * Based on the original DAC960 driver,
8 * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
9 * Portions Copyright 2002 by Mylex (An IBM Business Unit)
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/pci.h>
18 #include <linux/raid_class.h>
19 #include <asm/unaligned.h>
20 #include <scsi/scsi.h>
21 #include <scsi/scsi_host.h>
22 #include <scsi/scsi_device.h>
23 #include <scsi/scsi_cmnd.h>
24 #include <scsi/scsi_tcq.h>
27 static struct raid_template *myrb_raid_template;
29 static void myrb_monitor(struct work_struct *work);
30 static inline void myrb_translate_devstate(void *DeviceState);
32 static inline int myrb_logical_channel(struct Scsi_Host *shost)
34 return shost->max_channel - 1;
37 static struct myrb_devstate_name_entry {
38 enum myrb_devstate state;
40 } myrb_devstate_name_list[] = {
41 { MYRB_DEVICE_DEAD, "Dead" },
42 { MYRB_DEVICE_WO, "WriteOnly" },
43 { MYRB_DEVICE_ONLINE, "Online" },
44 { MYRB_DEVICE_CRITICAL, "Critical" },
45 { MYRB_DEVICE_STANDBY, "Standby" },
46 { MYRB_DEVICE_OFFLINE, "Offline" },
49 static const char *myrb_devstate_name(enum myrb_devstate state)
51 struct myrb_devstate_name_entry *entry = myrb_devstate_name_list;
54 for (i = 0; i < ARRAY_SIZE(myrb_devstate_name_list); i++) {
55 if (entry[i].state == state)
61 static struct myrb_raidlevel_name_entry {
62 enum myrb_raidlevel level;
64 } myrb_raidlevel_name_list[] = {
65 { MYRB_RAID_LEVEL0, "RAID0" },
66 { MYRB_RAID_LEVEL1, "RAID1" },
67 { MYRB_RAID_LEVEL3, "RAID3" },
68 { MYRB_RAID_LEVEL5, "RAID5" },
69 { MYRB_RAID_LEVEL6, "RAID6" },
70 { MYRB_RAID_JBOD, "JBOD" },
73 static const char *myrb_raidlevel_name(enum myrb_raidlevel level)
75 struct myrb_raidlevel_name_entry *entry = myrb_raidlevel_name_list;
78 for (i = 0; i < ARRAY_SIZE(myrb_raidlevel_name_list); i++) {
79 if (entry[i].level == level)
86 * myrb_create_mempools - allocates auxiliary data structures
88 * Return: true on success, false otherwise.
90 static bool myrb_create_mempools(struct pci_dev *pdev, struct myrb_hba *cb)
92 size_t elem_size, elem_align;
94 elem_align = sizeof(struct myrb_sge);
95 elem_size = cb->host->sg_tablesize * elem_align;
96 cb->sg_pool = dma_pool_create("myrb_sg", &pdev->dev,
97 elem_size, elem_align, 0);
98 if (cb->sg_pool == NULL) {
99 shost_printk(KERN_ERR, cb->host,
100 "Failed to allocate SG pool\n");
104 cb->dcdb_pool = dma_pool_create("myrb_dcdb", &pdev->dev,
105 sizeof(struct myrb_dcdb),
106 sizeof(unsigned int), 0);
107 if (!cb->dcdb_pool) {
108 dma_pool_destroy(cb->sg_pool);
110 shost_printk(KERN_ERR, cb->host,
111 "Failed to allocate DCDB pool\n");
115 snprintf(cb->work_q_name, sizeof(cb->work_q_name),
116 "myrb_wq_%d", cb->host->host_no);
117 cb->work_q = create_singlethread_workqueue(cb->work_q_name);
119 dma_pool_destroy(cb->dcdb_pool);
120 cb->dcdb_pool = NULL;
121 dma_pool_destroy(cb->sg_pool);
123 shost_printk(KERN_ERR, cb->host,
124 "Failed to create workqueue\n");
129 * Initialize the Monitoring Timer.
131 INIT_DELAYED_WORK(&cb->monitor_work, myrb_monitor);
132 queue_delayed_work(cb->work_q, &cb->monitor_work, 1);
138 * myrb_destroy_mempools - tears down the memory pools for the controller
140 static void myrb_destroy_mempools(struct myrb_hba *cb)
142 cancel_delayed_work_sync(&cb->monitor_work);
143 destroy_workqueue(cb->work_q);
145 dma_pool_destroy(cb->sg_pool);
146 dma_pool_destroy(cb->dcdb_pool);
150 * myrb_reset_cmd - reset command block
152 static inline void myrb_reset_cmd(struct myrb_cmdblk *cmd_blk)
154 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
156 memset(mbox, 0, sizeof(union myrb_cmd_mbox));
161 * myrb_qcmd - queues command block for execution
163 static void myrb_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
165 void __iomem *base = cb->io_base;
166 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
167 union myrb_cmd_mbox *next_mbox = cb->next_cmd_mbox;
169 cb->write_cmd_mbox(next_mbox, mbox);
170 if (cb->prev_cmd_mbox1->words[0] == 0 ||
171 cb->prev_cmd_mbox2->words[0] == 0)
172 cb->get_cmd_mbox(base);
173 cb->prev_cmd_mbox2 = cb->prev_cmd_mbox1;
174 cb->prev_cmd_mbox1 = next_mbox;
175 if (++next_mbox > cb->last_cmd_mbox)
176 next_mbox = cb->first_cmd_mbox;
177 cb->next_cmd_mbox = next_mbox;
181 * myrb_exec_cmd - executes command block and waits for completion.
183 * Return: command status
185 static unsigned short myrb_exec_cmd(struct myrb_hba *cb,
186 struct myrb_cmdblk *cmd_blk)
188 DECLARE_COMPLETION_ONSTACK(cmpl);
191 cmd_blk->completion = &cmpl;
193 spin_lock_irqsave(&cb->queue_lock, flags);
194 cb->qcmd(cb, cmd_blk);
195 spin_unlock_irqrestore(&cb->queue_lock, flags);
197 wait_for_completion(&cmpl);
198 return cmd_blk->status;
202 * myrb_exec_type3 - executes a type 3 command and waits for completion.
204 * Return: command status
206 static unsigned short myrb_exec_type3(struct myrb_hba *cb,
207 enum myrb_cmd_opcode op, dma_addr_t addr)
209 struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
210 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
211 unsigned short status;
213 mutex_lock(&cb->dcmd_mutex);
214 myrb_reset_cmd(cmd_blk);
215 mbox->type3.id = MYRB_DCMD_TAG;
216 mbox->type3.opcode = op;
217 mbox->type3.addr = addr;
218 status = myrb_exec_cmd(cb, cmd_blk);
219 mutex_unlock(&cb->dcmd_mutex);
224 * myrb_exec_type3D - executes a type 3D command and waits for completion.
226 * Return: command status
228 static unsigned short myrb_exec_type3D(struct myrb_hba *cb,
229 enum myrb_cmd_opcode op, struct scsi_device *sdev,
230 struct myrb_pdev_state *pdev_info)
232 struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
233 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
234 unsigned short status;
235 dma_addr_t pdev_info_addr;
237 pdev_info_addr = dma_map_single(&cb->pdev->dev, pdev_info,
238 sizeof(struct myrb_pdev_state),
240 if (dma_mapping_error(&cb->pdev->dev, pdev_info_addr))
241 return MYRB_STATUS_SUBSYS_FAILED;
243 mutex_lock(&cb->dcmd_mutex);
244 myrb_reset_cmd(cmd_blk);
245 mbox->type3D.id = MYRB_DCMD_TAG;
246 mbox->type3D.opcode = op;
247 mbox->type3D.channel = sdev->channel;
248 mbox->type3D.target = sdev->id;
249 mbox->type3D.addr = pdev_info_addr;
250 status = myrb_exec_cmd(cb, cmd_blk);
251 mutex_unlock(&cb->dcmd_mutex);
252 dma_unmap_single(&cb->pdev->dev, pdev_info_addr,
253 sizeof(struct myrb_pdev_state), DMA_FROM_DEVICE);
254 if (status == MYRB_STATUS_SUCCESS &&
255 mbox->type3D.opcode == MYRB_CMD_GET_DEVICE_STATE_OLD)
256 myrb_translate_devstate(pdev_info);
261 static char *myrb_event_msg[] = {
262 "killed because write recovery failed",
263 "killed because of SCSI bus reset failure",
264 "killed because of double check condition",
265 "killed because it was removed",
266 "killed because of gross error on SCSI chip",
267 "killed because of bad tag returned from drive",
268 "killed because of timeout on SCSI command",
269 "killed because of reset SCSI command issued from system",
270 "killed because busy or parity error count exceeded limit",
271 "killed because of 'kill drive' command from system",
272 "killed because of selection timeout",
273 "killed due to SCSI phase sequence error",
274 "killed due to unknown status",
278 * myrb_get_event - get event log from HBA
279 * @cb: pointer to the hba structure
280 * @event: number of the event
282 * Execute a type 3E command and logs the event message
284 static void myrb_get_event(struct myrb_hba *cb, unsigned int event)
286 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
287 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
288 struct myrb_log_entry *ev_buf;
290 unsigned short status;
292 ev_buf = dma_alloc_coherent(&cb->pdev->dev,
293 sizeof(struct myrb_log_entry),
294 &ev_addr, GFP_KERNEL);
298 myrb_reset_cmd(cmd_blk);
299 mbox->type3E.id = MYRB_MCMD_TAG;
300 mbox->type3E.opcode = MYRB_CMD_EVENT_LOG_OPERATION;
301 mbox->type3E.optype = DAC960_V1_GetEventLogEntry;
302 mbox->type3E.opqual = 1;
303 mbox->type3E.ev_seq = event;
304 mbox->type3E.addr = ev_addr;
305 status = myrb_exec_cmd(cb, cmd_blk);
306 if (status != MYRB_STATUS_SUCCESS)
307 shost_printk(KERN_INFO, cb->host,
308 "Failed to get event log %d, status %04x\n",
311 else if (ev_buf->seq_num == event) {
312 struct scsi_sense_hdr sshdr;
314 memset(&sshdr, 0, sizeof(sshdr));
315 scsi_normalize_sense(ev_buf->sense, 32, &sshdr);
317 if (sshdr.sense_key == VENDOR_SPECIFIC &&
319 sshdr.ascq < ARRAY_SIZE(myrb_event_msg))
320 shost_printk(KERN_CRIT, cb->host,
321 "Physical drive %d:%d: %s\n",
322 ev_buf->channel, ev_buf->target,
323 myrb_event_msg[sshdr.ascq]);
325 shost_printk(KERN_CRIT, cb->host,
326 "Physical drive %d:%d: Sense: %X/%02X/%02X\n",
327 ev_buf->channel, ev_buf->target,
328 sshdr.sense_key, sshdr.asc, sshdr.ascq);
331 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_log_entry),
336 * myrb_get_errtable - retrieves the error table from the controller
338 * Executes a type 3 command and logs the error table from the controller.
340 static void myrb_get_errtable(struct myrb_hba *cb)
342 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
343 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
344 unsigned short status;
345 struct myrb_error_entry old_table[MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS];
347 memcpy(&old_table, cb->err_table, sizeof(old_table));
349 myrb_reset_cmd(cmd_blk);
350 mbox->type3.id = MYRB_MCMD_TAG;
351 mbox->type3.opcode = MYRB_CMD_GET_ERROR_TABLE;
352 mbox->type3.addr = cb->err_table_addr;
353 status = myrb_exec_cmd(cb, cmd_blk);
354 if (status == MYRB_STATUS_SUCCESS) {
355 struct myrb_error_entry *table = cb->err_table;
356 struct myrb_error_entry *new, *old;
357 size_t err_table_offset;
358 struct scsi_device *sdev;
360 shost_for_each_device(sdev, cb->host) {
361 if (sdev->channel >= myrb_logical_channel(cb->host))
363 err_table_offset = sdev->channel * MYRB_MAX_TARGETS
365 new = table + err_table_offset;
366 old = &old_table[err_table_offset];
367 if (new->parity_err == old->parity_err &&
368 new->soft_err == old->soft_err &&
369 new->hard_err == old->hard_err &&
370 new->misc_err == old->misc_err)
372 sdev_printk(KERN_CRIT, sdev,
373 "Errors: Parity = %d, Soft = %d, Hard = %d, Misc = %d\n",
374 new->parity_err, new->soft_err,
375 new->hard_err, new->misc_err);
381 * myrb_get_ldev_info - retrieves the logical device table from the controller
383 * Executes a type 3 command and updates the logical device table.
385 * Return: command status
387 static unsigned short myrb_get_ldev_info(struct myrb_hba *cb)
389 unsigned short status;
390 int ldev_num, ldev_cnt = cb->enquiry->ldev_count;
391 struct Scsi_Host *shost = cb->host;
393 status = myrb_exec_type3(cb, MYRB_CMD_GET_LDEV_INFO,
395 if (status != MYRB_STATUS_SUCCESS)
398 for (ldev_num = 0; ldev_num < ldev_cnt; ldev_num++) {
399 struct myrb_ldev_info *old = NULL;
400 struct myrb_ldev_info *new = cb->ldev_info_buf + ldev_num;
401 struct scsi_device *sdev;
403 sdev = scsi_device_lookup(shost, myrb_logical_channel(shost),
406 if (new->state == MYRB_DEVICE_OFFLINE)
408 shost_printk(KERN_INFO, shost,
409 "Adding Logical Drive %d in state %s\n",
410 ldev_num, myrb_devstate_name(new->state));
411 scsi_add_device(shost, myrb_logical_channel(shost),
415 old = sdev->hostdata;
416 if (new->state != old->state)
417 shost_printk(KERN_INFO, shost,
418 "Logical Drive %d is now %s\n",
419 ldev_num, myrb_devstate_name(new->state));
420 if (new->wb_enabled != old->wb_enabled)
421 sdev_printk(KERN_INFO, sdev,
422 "Logical Drive is now WRITE %s\n",
423 (new->wb_enabled ? "BACK" : "THRU"));
424 memcpy(old, new, sizeof(*new));
425 scsi_device_put(sdev);
431 * myrb_get_rbld_progress - get rebuild progress information
433 * Executes a type 3 command and returns the rebuild progress
436 * Return: command status
438 static unsigned short myrb_get_rbld_progress(struct myrb_hba *cb,
439 struct myrb_rbld_progress *rbld)
441 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
442 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
443 struct myrb_rbld_progress *rbld_buf;
444 dma_addr_t rbld_addr;
445 unsigned short status;
447 rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
448 sizeof(struct myrb_rbld_progress),
449 &rbld_addr, GFP_KERNEL);
451 return MYRB_STATUS_RBLD_NOT_CHECKED;
453 myrb_reset_cmd(cmd_blk);
454 mbox->type3.id = MYRB_MCMD_TAG;
455 mbox->type3.opcode = MYRB_CMD_GET_REBUILD_PROGRESS;
456 mbox->type3.addr = rbld_addr;
457 status = myrb_exec_cmd(cb, cmd_blk);
459 memcpy(rbld, rbld_buf, sizeof(struct myrb_rbld_progress));
460 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
461 rbld_buf, rbld_addr);
466 * myrb_update_rbld_progress - updates the rebuild status
468 * Updates the rebuild status for the attached logical devices.
470 static void myrb_update_rbld_progress(struct myrb_hba *cb)
472 struct myrb_rbld_progress rbld_buf;
473 unsigned short status;
475 status = myrb_get_rbld_progress(cb, &rbld_buf);
476 if (status == MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS &&
477 cb->last_rbld_status == MYRB_STATUS_SUCCESS)
478 status = MYRB_STATUS_RBLD_SUCCESS;
479 if (status != MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS) {
480 unsigned int blocks_done =
481 rbld_buf.ldev_size - rbld_buf.blocks_left;
482 struct scsi_device *sdev;
484 sdev = scsi_device_lookup(cb->host,
485 myrb_logical_channel(cb->host),
486 rbld_buf.ldev_num, 0);
491 case MYRB_STATUS_SUCCESS:
492 sdev_printk(KERN_INFO, sdev,
493 "Rebuild in Progress, %d%% completed\n",
494 (100 * (blocks_done >> 7))
495 / (rbld_buf.ldev_size >> 7));
497 case MYRB_STATUS_RBLD_FAILED_LDEV_FAILURE:
498 sdev_printk(KERN_INFO, sdev,
499 "Rebuild Failed due to Logical Drive Failure\n");
501 case MYRB_STATUS_RBLD_FAILED_BADBLOCKS:
502 sdev_printk(KERN_INFO, sdev,
503 "Rebuild Failed due to Bad Blocks on Other Drives\n");
505 case MYRB_STATUS_RBLD_FAILED_NEW_DRIVE_FAILED:
506 sdev_printk(KERN_INFO, sdev,
507 "Rebuild Failed due to Failure of Drive Being Rebuilt\n");
509 case MYRB_STATUS_RBLD_SUCCESS:
510 sdev_printk(KERN_INFO, sdev,
511 "Rebuild Completed Successfully\n");
513 case MYRB_STATUS_RBLD_SUCCESS_TERMINATED:
514 sdev_printk(KERN_INFO, sdev,
515 "Rebuild Successfully Terminated\n");
520 scsi_device_put(sdev);
522 cb->last_rbld_status = status;
526 * myrb_get_cc_progress - retrieve the rebuild status
528 * Execute a type 3 Command and fetch the rebuild / consistency check
531 static void myrb_get_cc_progress(struct myrb_hba *cb)
533 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
534 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
535 struct myrb_rbld_progress *rbld_buf;
536 dma_addr_t rbld_addr;
537 unsigned short status;
539 rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
540 sizeof(struct myrb_rbld_progress),
541 &rbld_addr, GFP_KERNEL);
543 cb->need_cc_status = true;
546 myrb_reset_cmd(cmd_blk);
547 mbox->type3.id = MYRB_MCMD_TAG;
548 mbox->type3.opcode = MYRB_CMD_REBUILD_STAT;
549 mbox->type3.addr = rbld_addr;
550 status = myrb_exec_cmd(cb, cmd_blk);
551 if (status == MYRB_STATUS_SUCCESS) {
552 unsigned int ldev_num = rbld_buf->ldev_num;
553 unsigned int ldev_size = rbld_buf->ldev_size;
554 unsigned int blocks_done =
555 ldev_size - rbld_buf->blocks_left;
556 struct scsi_device *sdev;
558 sdev = scsi_device_lookup(cb->host,
559 myrb_logical_channel(cb->host),
562 sdev_printk(KERN_INFO, sdev,
563 "Consistency Check in Progress: %d%% completed\n",
564 (100 * (blocks_done >> 7))
566 scsi_device_put(sdev);
569 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
570 rbld_buf, rbld_addr);
574 * myrb_bgi_control - updates background initialisation status
576 * Executes a type 3B command and updates the background initialisation status
578 static void myrb_bgi_control(struct myrb_hba *cb)
580 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
581 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
582 struct myrb_bgi_status *bgi, *last_bgi;
584 struct scsi_device *sdev = NULL;
585 unsigned short status;
587 bgi = dma_alloc_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
588 &bgi_addr, GFP_KERNEL);
590 shost_printk(KERN_ERR, cb->host,
591 "Failed to allocate bgi memory\n");
594 myrb_reset_cmd(cmd_blk);
595 mbox->type3B.id = MYRB_DCMD_TAG;
596 mbox->type3B.opcode = MYRB_CMD_BGI_CONTROL;
597 mbox->type3B.optype = 0x20;
598 mbox->type3B.addr = bgi_addr;
599 status = myrb_exec_cmd(cb, cmd_blk);
600 last_bgi = &cb->bgi_status;
601 sdev = scsi_device_lookup(cb->host,
602 myrb_logical_channel(cb->host),
605 case MYRB_STATUS_SUCCESS:
606 switch (bgi->status) {
607 case MYRB_BGI_INVALID:
609 case MYRB_BGI_STARTED:
612 sdev_printk(KERN_INFO, sdev,
613 "Background Initialization Started\n");
615 case MYRB_BGI_INPROGRESS:
618 if (bgi->blocks_done == last_bgi->blocks_done &&
619 bgi->ldev_num == last_bgi->ldev_num)
621 sdev_printk(KERN_INFO, sdev,
622 "Background Initialization in Progress: %d%% completed\n",
623 (100 * (bgi->blocks_done >> 7))
624 / (bgi->ldev_size >> 7));
626 case MYRB_BGI_SUSPENDED:
629 sdev_printk(KERN_INFO, sdev,
630 "Background Initialization Suspended\n");
632 case MYRB_BGI_CANCELLED:
635 sdev_printk(KERN_INFO, sdev,
636 "Background Initialization Cancelled\n");
639 memcpy(&cb->bgi_status, bgi, sizeof(struct myrb_bgi_status));
641 case MYRB_STATUS_BGI_SUCCESS:
642 if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
643 sdev_printk(KERN_INFO, sdev,
644 "Background Initialization Completed Successfully\n");
645 cb->bgi_status.status = MYRB_BGI_INVALID;
647 case MYRB_STATUS_BGI_ABORTED:
648 if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
649 sdev_printk(KERN_INFO, sdev,
650 "Background Initialization Aborted\n");
652 case MYRB_STATUS_NO_BGI_INPROGRESS:
653 cb->bgi_status.status = MYRB_BGI_INVALID;
657 scsi_device_put(sdev);
658 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
663 * myrb_hba_enquiry - updates the controller status
665 * Executes a DAC_V1_Enquiry command and updates the controller status.
667 * Return: command status
669 static unsigned short myrb_hba_enquiry(struct myrb_hba *cb)
671 struct myrb_enquiry old, *new;
672 unsigned short status;
674 memcpy(&old, cb->enquiry, sizeof(struct myrb_enquiry));
676 status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY, cb->enquiry_addr);
677 if (status != MYRB_STATUS_SUCCESS)
681 if (new->ldev_count > old.ldev_count) {
682 int ldev_num = old.ldev_count - 1;
684 while (++ldev_num < new->ldev_count)
685 shost_printk(KERN_CRIT, cb->host,
686 "Logical Drive %d Now Exists\n",
689 if (new->ldev_count < old.ldev_count) {
690 int ldev_num = new->ldev_count - 1;
692 while (++ldev_num < old.ldev_count)
693 shost_printk(KERN_CRIT, cb->host,
694 "Logical Drive %d No Longer Exists\n",
697 if (new->status.deferred != old.status.deferred)
698 shost_printk(KERN_CRIT, cb->host,
699 "Deferred Write Error Flag is now %s\n",
700 (new->status.deferred ? "TRUE" : "FALSE"));
701 if (new->ev_seq != old.ev_seq) {
702 cb->new_ev_seq = new->ev_seq;
703 cb->need_err_info = true;
704 shost_printk(KERN_INFO, cb->host,
705 "Event log %d/%d (%d/%d) available\n",
706 cb->old_ev_seq, cb->new_ev_seq,
707 old.ev_seq, new->ev_seq);
709 if ((new->ldev_critical > 0 &&
710 new->ldev_critical != old.ldev_critical) ||
711 (new->ldev_offline > 0 &&
712 new->ldev_offline != old.ldev_offline) ||
713 (new->ldev_count != old.ldev_count)) {
714 shost_printk(KERN_INFO, cb->host,
715 "Logical drive count changed (%d/%d/%d)\n",
719 cb->need_ldev_info = true;
721 if (new->pdev_dead > 0 ||
722 new->pdev_dead != old.pdev_dead ||
723 time_after_eq(jiffies, cb->secondary_monitor_time
724 + MYRB_SECONDARY_MONITOR_INTERVAL)) {
725 cb->need_bgi_status = cb->bgi_status_supported;
726 cb->secondary_monitor_time = jiffies;
728 if (new->rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
729 new->rbld == MYRB_BG_RBLD_IN_PROGRESS ||
730 old.rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
731 old.rbld == MYRB_BG_RBLD_IN_PROGRESS) {
732 cb->need_rbld = true;
733 cb->rbld_first = (new->ldev_critical < old.ldev_critical);
735 if (old.rbld == MYRB_BG_CHECK_IN_PROGRESS)
737 case MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS:
738 shost_printk(KERN_INFO, cb->host,
739 "Consistency Check Completed Successfully\n");
741 case MYRB_STDBY_RBLD_IN_PROGRESS:
742 case MYRB_BG_RBLD_IN_PROGRESS:
744 case MYRB_BG_CHECK_IN_PROGRESS:
745 cb->need_cc_status = true;
747 case MYRB_STDBY_RBLD_COMPLETED_WITH_ERROR:
748 shost_printk(KERN_INFO, cb->host,
749 "Consistency Check Completed with Error\n");
751 case MYRB_BG_RBLD_OR_CHECK_FAILED_DRIVE_FAILED:
752 shost_printk(KERN_INFO, cb->host,
753 "Consistency Check Failed - Physical Device Failed\n");
755 case MYRB_BG_RBLD_OR_CHECK_FAILED_LDEV_FAILED:
756 shost_printk(KERN_INFO, cb->host,
757 "Consistency Check Failed - Logical Drive Failed\n");
759 case MYRB_BG_RBLD_OR_CHECK_FAILED_OTHER:
760 shost_printk(KERN_INFO, cb->host,
761 "Consistency Check Failed - Other Causes\n");
763 case MYRB_BG_RBLD_OR_CHECK_SUCCESS_TERMINATED:
764 shost_printk(KERN_INFO, cb->host,
765 "Consistency Check Successfully Terminated\n");
768 else if (new->rbld == MYRB_BG_CHECK_IN_PROGRESS)
769 cb->need_cc_status = true;
771 return MYRB_STATUS_SUCCESS;
775 * myrb_set_pdev_state - sets the device state for a physical device
777 * Return: command status
779 static unsigned short myrb_set_pdev_state(struct myrb_hba *cb,
780 struct scsi_device *sdev, enum myrb_devstate state)
782 struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
783 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
784 unsigned short status;
786 mutex_lock(&cb->dcmd_mutex);
787 mbox->type3D.opcode = MYRB_CMD_START_DEVICE;
788 mbox->type3D.id = MYRB_DCMD_TAG;
789 mbox->type3D.channel = sdev->channel;
790 mbox->type3D.target = sdev->id;
791 mbox->type3D.state = state & 0x1F;
792 status = myrb_exec_cmd(cb, cmd_blk);
793 mutex_unlock(&cb->dcmd_mutex);
799 * myrb_enable_mmio - enables the Memory Mailbox Interface
801 * PD and P controller types have no memory mailbox, but still need the
802 * other dma mapped memory.
804 * Return: true on success, false otherwise.
806 static bool myrb_enable_mmio(struct myrb_hba *cb, mbox_mmio_init_t mmio_init_fn)
808 void __iomem *base = cb->io_base;
809 struct pci_dev *pdev = cb->pdev;
810 size_t err_table_size;
811 size_t ldev_info_size;
812 union myrb_cmd_mbox *cmd_mbox_mem;
813 struct myrb_stat_mbox *stat_mbox_mem;
814 union myrb_cmd_mbox mbox;
815 unsigned short status;
817 memset(&mbox, 0, sizeof(union myrb_cmd_mbox));
819 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
820 dev_err(&pdev->dev, "DMA mask out of range\n");
824 cb->enquiry = dma_alloc_coherent(&pdev->dev,
825 sizeof(struct myrb_enquiry),
826 &cb->enquiry_addr, GFP_KERNEL);
830 err_table_size = sizeof(struct myrb_error_entry) *
831 MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
832 cb->err_table = dma_alloc_coherent(&pdev->dev, err_table_size,
833 &cb->err_table_addr, GFP_KERNEL);
837 ldev_info_size = sizeof(struct myrb_ldev_info) * MYRB_MAX_LDEVS;
838 cb->ldev_info_buf = dma_alloc_coherent(&pdev->dev, ldev_info_size,
839 &cb->ldev_info_addr, GFP_KERNEL);
840 if (!cb->ldev_info_buf)
844 * Skip mailbox initialisation for PD and P Controllers
849 /* These are the base addresses for the command memory mailbox array */
850 cb->cmd_mbox_size = MYRB_CMD_MBOX_COUNT * sizeof(union myrb_cmd_mbox);
851 cb->first_cmd_mbox = dma_alloc_coherent(&pdev->dev,
855 if (!cb->first_cmd_mbox)
858 cmd_mbox_mem = cb->first_cmd_mbox;
859 cmd_mbox_mem += MYRB_CMD_MBOX_COUNT - 1;
860 cb->last_cmd_mbox = cmd_mbox_mem;
861 cb->next_cmd_mbox = cb->first_cmd_mbox;
862 cb->prev_cmd_mbox1 = cb->last_cmd_mbox;
863 cb->prev_cmd_mbox2 = cb->last_cmd_mbox - 1;
865 /* These are the base addresses for the status memory mailbox array */
866 cb->stat_mbox_size = MYRB_STAT_MBOX_COUNT *
867 sizeof(struct myrb_stat_mbox);
868 cb->first_stat_mbox = dma_alloc_coherent(&pdev->dev,
872 if (!cb->first_stat_mbox)
875 stat_mbox_mem = cb->first_stat_mbox;
876 stat_mbox_mem += MYRB_STAT_MBOX_COUNT - 1;
877 cb->last_stat_mbox = stat_mbox_mem;
878 cb->next_stat_mbox = cb->first_stat_mbox;
880 /* Enable the Memory Mailbox Interface. */
881 cb->dual_mode_interface = true;
882 mbox.typeX.opcode = 0x2B;
884 mbox.typeX.opcode2 = 0x14;
885 mbox.typeX.cmd_mbox_addr = cb->cmd_mbox_addr;
886 mbox.typeX.stat_mbox_addr = cb->stat_mbox_addr;
888 status = mmio_init_fn(pdev, base, &mbox);
889 if (status != MYRB_STATUS_SUCCESS) {
890 cb->dual_mode_interface = false;
891 mbox.typeX.opcode2 = 0x10;
892 status = mmio_init_fn(pdev, base, &mbox);
893 if (status != MYRB_STATUS_SUCCESS) {
895 "Failed to enable mailbox, statux %02X\n",
904 * myrb_get_hba_config - reads the configuration information
906 * Reads the configuration information from the controller and
907 * initializes the controller structure.
909 * Return: 0 on success, errno otherwise
911 static int myrb_get_hba_config(struct myrb_hba *cb)
913 struct myrb_enquiry2 *enquiry2;
914 dma_addr_t enquiry2_addr;
915 struct myrb_config2 *config2;
916 dma_addr_t config2_addr;
917 struct Scsi_Host *shost = cb->host;
918 struct pci_dev *pdev = cb->pdev;
919 int pchan_max = 0, pchan_cur = 0;
920 unsigned short status;
921 int ret = -ENODEV, memsize = 0;
923 enquiry2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
924 &enquiry2_addr, GFP_KERNEL);
926 shost_printk(KERN_ERR, cb->host,
927 "Failed to allocate V1 enquiry2 memory\n");
930 config2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_config2),
931 &config2_addr, GFP_KERNEL);
933 shost_printk(KERN_ERR, cb->host,
934 "Failed to allocate V1 config2 memory\n");
935 dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
936 enquiry2, enquiry2_addr);
939 mutex_lock(&cb->dma_mutex);
940 status = myrb_hba_enquiry(cb);
941 mutex_unlock(&cb->dma_mutex);
942 if (status != MYRB_STATUS_SUCCESS) {
943 shost_printk(KERN_WARNING, cb->host,
944 "Failed it issue V1 Enquiry\n");
948 status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY2, enquiry2_addr);
949 if (status != MYRB_STATUS_SUCCESS) {
950 shost_printk(KERN_WARNING, cb->host,
951 "Failed to issue V1 Enquiry2\n");
955 status = myrb_exec_type3(cb, MYRB_CMD_READ_CONFIG2, config2_addr);
956 if (status != MYRB_STATUS_SUCCESS) {
957 shost_printk(KERN_WARNING, cb->host,
958 "Failed to issue ReadConfig2\n");
962 status = myrb_get_ldev_info(cb);
963 if (status != MYRB_STATUS_SUCCESS) {
964 shost_printk(KERN_WARNING, cb->host,
965 "Failed to get logical drive information\n");
970 * Initialize the Controller Model Name and Full Model Name fields.
972 switch (enquiry2->hw.sub_model) {
973 case DAC960_V1_P_PD_PU:
974 if (enquiry2->scsi_cap.bus_speed == MYRB_SCSI_SPEED_ULTRA)
975 strcpy(cb->model_name, "DAC960PU");
977 strcpy(cb->model_name, "DAC960PD");
980 strcpy(cb->model_name, "DAC960PL");
983 strcpy(cb->model_name, "DAC960PG");
986 strcpy(cb->model_name, "DAC960PJ");
989 strcpy(cb->model_name, "DAC960PR");
992 strcpy(cb->model_name, "DAC960PT");
995 strcpy(cb->model_name, "DAC960PTL0");
998 strcpy(cb->model_name, "DAC960PRL");
1000 case DAC960_V1_PTL1:
1001 strcpy(cb->model_name, "DAC960PTL1");
1003 case DAC960_V1_1164P:
1004 strcpy(cb->model_name, "eXtremeRAID 1100");
1007 shost_printk(KERN_WARNING, cb->host,
1008 "Unknown Model %X\n",
1009 enquiry2->hw.sub_model);
1013 * Initialize the Controller Firmware Version field and verify that it
1014 * is a supported firmware version.
1015 * The supported firmware versions are:
1017 * DAC1164P 5.06 and above
1018 * DAC960PTL/PRL/PJ/PG 4.06 and above
1019 * DAC960PU/PD/PL 3.51 and above
1020 * DAC960PU/PD/PL/P 2.73 and above
1022 #if defined(CONFIG_ALPHA)
1024 * DEC Alpha machines were often equipped with DAC960 cards that were
1025 * OEMed from Mylex, and had their own custom firmware. Version 2.70,
1026 * the last custom FW revision to be released by DEC for these older
1027 * controllers, appears to work quite well with this driver.
1029 * Cards tested successfully were several versions each of the PD and
1030 * PU, called by DEC the KZPSC and KZPAC, respectively, and having
1031 * the Manufacturer Numbers (from Mylex), usually on a sticker on the
1032 * back of the board, of:
1034 * KZPSC: D040347 (1-channel) or D040348 (2-channel)
1035 * or D040349 (3-channel)
1036 * KZPAC: D040395 (1-channel) or D040396 (2-channel)
1037 * or D040397 (3-channel)
1039 # define FIRMWARE_27X "2.70"
1041 # define FIRMWARE_27X "2.73"
1044 if (enquiry2->fw.major_version == 0) {
1045 enquiry2->fw.major_version = cb->enquiry->fw_major_version;
1046 enquiry2->fw.minor_version = cb->enquiry->fw_minor_version;
1047 enquiry2->fw.firmware_type = '0';
1048 enquiry2->fw.turn_id = 0;
1050 snprintf(cb->fw_version, sizeof(cb->fw_version),
1052 enquiry2->fw.major_version,
1053 enquiry2->fw.minor_version,
1054 enquiry2->fw.firmware_type,
1055 enquiry2->fw.turn_id);
1056 if (!((enquiry2->fw.major_version == 5 &&
1057 enquiry2->fw.minor_version >= 6) ||
1058 (enquiry2->fw.major_version == 4 &&
1059 enquiry2->fw.minor_version >= 6) ||
1060 (enquiry2->fw.major_version == 3 &&
1061 enquiry2->fw.minor_version >= 51) ||
1062 (enquiry2->fw.major_version == 2 &&
1063 strcmp(cb->fw_version, FIRMWARE_27X) >= 0))) {
1064 shost_printk(KERN_WARNING, cb->host,
1065 "Firmware Version '%s' unsupported\n",
1070 * Initialize the Channels, Targets, Memory Size, and SAF-TE
1071 * Enclosure Management Enabled fields.
1073 switch (enquiry2->hw.model) {
1074 case MYRB_5_CHANNEL_BOARD:
1077 case MYRB_3_CHANNEL_BOARD:
1078 case MYRB_3_CHANNEL_ASIC_DAC:
1081 case MYRB_2_CHANNEL_BOARD:
1085 pchan_max = enquiry2->cfg_chan;
1088 pchan_cur = enquiry2->cur_chan;
1089 if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_32BIT)
1091 else if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_16BIT)
1095 cb->ldev_block_size = enquiry2->ldev_block_size;
1096 shost->max_channel = pchan_cur;
1097 shost->max_id = enquiry2->max_targets;
1098 memsize = enquiry2->mem_size >> 20;
1099 cb->safte_enabled = (enquiry2->fault_mgmt == MYRB_FAULT_SAFTE);
1101 * Initialize the Controller Queue Depth, Driver Queue Depth,
1102 * Logical Drive Count, Maximum Blocks per Command, Controller
1103 * Scatter/Gather Limit, and Driver Scatter/Gather Limit.
1104 * The Driver Queue Depth must be at most one less than the
1105 * Controller Queue Depth to allow for an automatic drive
1106 * rebuild operation.
1108 shost->can_queue = cb->enquiry->max_tcq;
1109 if (shost->can_queue < 3)
1110 shost->can_queue = enquiry2->max_cmds;
1111 if (shost->can_queue < 3)
1112 /* Play safe and disable TCQ */
1113 shost->can_queue = 1;
1115 if (shost->can_queue > MYRB_CMD_MBOX_COUNT - 2)
1116 shost->can_queue = MYRB_CMD_MBOX_COUNT - 2;
1117 shost->max_sectors = enquiry2->max_sectors;
1118 shost->sg_tablesize = enquiry2->max_sge;
1119 if (shost->sg_tablesize > MYRB_SCATTER_GATHER_LIMIT)
1120 shost->sg_tablesize = MYRB_SCATTER_GATHER_LIMIT;
1122 * Initialize the Stripe Size, Segment Size, and Geometry Translation.
1124 cb->stripe_size = config2->blocks_per_stripe * config2->block_factor
1125 >> (10 - MYRB_BLKSIZE_BITS);
1126 cb->segment_size = config2->blocks_per_cacheline * config2->block_factor
1127 >> (10 - MYRB_BLKSIZE_BITS);
1128 /* Assume 255/63 translation */
1129 cb->ldev_geom_heads = 255;
1130 cb->ldev_geom_sectors = 63;
1131 if (config2->drive_geometry) {
1132 cb->ldev_geom_heads = 128;
1133 cb->ldev_geom_sectors = 32;
1137 * Initialize the Background Initialization Status.
1139 if ((cb->fw_version[0] == '4' &&
1140 strcmp(cb->fw_version, "4.08") >= 0) ||
1141 (cb->fw_version[0] == '5' &&
1142 strcmp(cb->fw_version, "5.08") >= 0)) {
1143 cb->bgi_status_supported = true;
1144 myrb_bgi_control(cb);
1146 cb->last_rbld_status = MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS;
1150 shost_printk(KERN_INFO, cb->host,
1151 "Configuring %s PCI RAID Controller\n", cb->model_name);
1152 shost_printk(KERN_INFO, cb->host,
1153 " Firmware Version: %s, Memory Size: %dMB\n",
1154 cb->fw_version, memsize);
1155 if (cb->io_addr == 0)
1156 shost_printk(KERN_INFO, cb->host,
1157 " I/O Address: n/a, PCI Address: 0x%lX, IRQ Channel: %d\n",
1158 (unsigned long)cb->pci_addr, cb->irq);
1160 shost_printk(KERN_INFO, cb->host,
1161 " I/O Address: 0x%lX, PCI Address: 0x%lX, IRQ Channel: %d\n",
1162 (unsigned long)cb->io_addr, (unsigned long)cb->pci_addr,
1164 shost_printk(KERN_INFO, cb->host,
1165 " Controller Queue Depth: %d, Maximum Blocks per Command: %d\n",
1166 cb->host->can_queue, cb->host->max_sectors);
1167 shost_printk(KERN_INFO, cb->host,
1168 " Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n",
1169 cb->host->can_queue, cb->host->sg_tablesize,
1170 MYRB_SCATTER_GATHER_LIMIT);
1171 shost_printk(KERN_INFO, cb->host,
1172 " Stripe Size: %dKB, Segment Size: %dKB, BIOS Geometry: %d/%d%s\n",
1173 cb->stripe_size, cb->segment_size,
1174 cb->ldev_geom_heads, cb->ldev_geom_sectors,
1176 " SAF-TE Enclosure Management Enabled" : "");
1177 shost_printk(KERN_INFO, cb->host,
1178 " Physical: %d/%d channels %d/%d/%d devices\n",
1179 pchan_cur, pchan_max, 0, cb->enquiry->pdev_dead,
1182 shost_printk(KERN_INFO, cb->host,
1183 " Logical: 1/1 channels, %d/%d disks\n",
1184 cb->enquiry->ldev_count, MYRB_MAX_LDEVS);
1187 dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
1188 enquiry2, enquiry2_addr);
1189 dma_free_coherent(&pdev->dev, sizeof(struct myrb_config2),
1190 config2, config2_addr);
1196 * myrb_unmap - unmaps controller structures
1198 static void myrb_unmap(struct myrb_hba *cb)
1200 if (cb->ldev_info_buf) {
1201 size_t ldev_info_size = sizeof(struct myrb_ldev_info) *
1203 dma_free_coherent(&cb->pdev->dev, ldev_info_size,
1204 cb->ldev_info_buf, cb->ldev_info_addr);
1205 cb->ldev_info_buf = NULL;
1207 if (cb->err_table) {
1208 size_t err_table_size = sizeof(struct myrb_error_entry) *
1209 MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
1210 dma_free_coherent(&cb->pdev->dev, err_table_size,
1211 cb->err_table, cb->err_table_addr);
1212 cb->err_table = NULL;
1215 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_enquiry),
1216 cb->enquiry, cb->enquiry_addr);
1219 if (cb->first_stat_mbox) {
1220 dma_free_coherent(&cb->pdev->dev, cb->stat_mbox_size,
1221 cb->first_stat_mbox, cb->stat_mbox_addr);
1222 cb->first_stat_mbox = NULL;
1224 if (cb->first_cmd_mbox) {
1225 dma_free_coherent(&cb->pdev->dev, cb->cmd_mbox_size,
1226 cb->first_cmd_mbox, cb->cmd_mbox_addr);
1227 cb->first_cmd_mbox = NULL;
1232 * myrb_cleanup - cleanup controller structures
1234 static void myrb_cleanup(struct myrb_hba *cb)
1236 struct pci_dev *pdev = cb->pdev;
1238 /* Free the memory mailbox, status, and related structures */
1241 if (cb->mmio_base) {
1242 if (cb->disable_intr)
1243 cb->disable_intr(cb->io_base);
1244 iounmap(cb->mmio_base);
1247 free_irq(cb->irq, cb);
1249 release_region(cb->io_addr, 0x80);
1250 pci_set_drvdata(pdev, NULL);
1251 pci_disable_device(pdev);
1252 scsi_host_put(cb->host);
1255 static int myrb_host_reset(struct scsi_cmnd *scmd)
1257 struct Scsi_Host *shost = scmd->device->host;
1258 struct myrb_hba *cb = shost_priv(shost);
1260 cb->reset(cb->io_base);
1264 static int myrb_pthru_queuecommand(struct Scsi_Host *shost,
1265 struct scsi_cmnd *scmd)
1267 struct request *rq = scsi_cmd_to_rq(scmd);
1268 struct myrb_hba *cb = shost_priv(shost);
1269 struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
1270 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
1271 struct myrb_dcdb *dcdb;
1272 dma_addr_t dcdb_addr;
1273 struct scsi_device *sdev = scmd->device;
1274 struct scatterlist *sgl;
1275 unsigned long flags;
1278 myrb_reset_cmd(cmd_blk);
1279 dcdb = dma_pool_alloc(cb->dcdb_pool, GFP_ATOMIC, &dcdb_addr);
1281 return SCSI_MLQUEUE_HOST_BUSY;
1282 nsge = scsi_dma_map(scmd);
1284 dma_pool_free(cb->dcdb_pool, dcdb, dcdb_addr);
1285 scmd->result = (DID_ERROR << 16);
1290 mbox->type3.opcode = MYRB_CMD_DCDB;
1291 mbox->type3.id = rq->tag + 3;
1292 mbox->type3.addr = dcdb_addr;
1293 dcdb->channel = sdev->channel;
1294 dcdb->target = sdev->id;
1295 switch (scmd->sc_data_direction) {
1297 dcdb->data_xfer = MYRB_DCDB_XFER_NONE;
1300 dcdb->data_xfer = MYRB_DCDB_XFER_SYSTEM_TO_DEVICE;
1302 case DMA_FROM_DEVICE:
1303 dcdb->data_xfer = MYRB_DCDB_XFER_DEVICE_TO_SYSTEM;
1306 dcdb->data_xfer = MYRB_DCDB_XFER_ILLEGAL;
1309 dcdb->early_status = false;
1310 if (rq->timeout <= 10)
1311 dcdb->timeout = MYRB_DCDB_TMO_10_SECS;
1312 else if (rq->timeout <= 60)
1313 dcdb->timeout = MYRB_DCDB_TMO_60_SECS;
1314 else if (rq->timeout <= 600)
1315 dcdb->timeout = MYRB_DCDB_TMO_10_MINS;
1317 dcdb->timeout = MYRB_DCDB_TMO_24_HRS;
1318 dcdb->no_autosense = false;
1319 dcdb->allow_disconnect = true;
1320 sgl = scsi_sglist(scmd);
1321 dcdb->dma_addr = sg_dma_address(sgl);
1322 if (sg_dma_len(sgl) > USHRT_MAX) {
1323 dcdb->xfer_len_lo = sg_dma_len(sgl) & 0xffff;
1324 dcdb->xfer_len_hi4 = sg_dma_len(sgl) >> 16;
1326 dcdb->xfer_len_lo = sg_dma_len(sgl);
1327 dcdb->xfer_len_hi4 = 0;
1329 dcdb->cdb_len = scmd->cmd_len;
1330 dcdb->sense_len = sizeof(dcdb->sense);
1331 memcpy(&dcdb->cdb, scmd->cmnd, scmd->cmd_len);
1333 spin_lock_irqsave(&cb->queue_lock, flags);
1334 cb->qcmd(cb, cmd_blk);
1335 spin_unlock_irqrestore(&cb->queue_lock, flags);
1339 static void myrb_inquiry(struct myrb_hba *cb,
1340 struct scsi_cmnd *scmd)
1342 unsigned char inq[36] = {
1343 0x00, 0x00, 0x03, 0x02, 0x20, 0x00, 0x01, 0x00,
1344 0x4d, 0x59, 0x4c, 0x45, 0x58, 0x20, 0x20, 0x20,
1345 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1346 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1347 0x20, 0x20, 0x20, 0x20,
1350 if (cb->bus_width > 16)
1352 if (cb->bus_width > 8)
1354 memcpy(&inq[16], cb->model_name, 16);
1355 memcpy(&inq[32], cb->fw_version, 1);
1356 memcpy(&inq[33], &cb->fw_version[2], 2);
1357 memcpy(&inq[35], &cb->fw_version[7], 1);
1359 scsi_sg_copy_from_buffer(scmd, (void *)inq, 36);
1363 myrb_mode_sense(struct myrb_hba *cb, struct scsi_cmnd *scmd,
1364 struct myrb_ldev_info *ldev_info)
1366 unsigned char modes[32], *mode_pg;
1370 dbd = (scmd->cmnd[1] & 0x08) == 0x08;
1373 mode_pg = &modes[4];
1376 mode_pg = &modes[12];
1378 memset(modes, 0, sizeof(modes));
1379 modes[0] = mode_len - 1;
1381 unsigned char *block_desc = &modes[4];
1384 put_unaligned_be32(ldev_info->size, &block_desc[0]);
1385 put_unaligned_be32(cb->ldev_block_size, &block_desc[5]);
1389 if (ldev_info->wb_enabled)
1391 if (cb->segment_size) {
1393 put_unaligned_be16(cb->segment_size, &mode_pg[14]);
1396 scsi_sg_copy_from_buffer(scmd, modes, mode_len);
1399 static void myrb_request_sense(struct myrb_hba *cb,
1400 struct scsi_cmnd *scmd)
1402 scsi_build_sense(scmd, 0, NO_SENSE, 0, 0);
1403 scsi_sg_copy_from_buffer(scmd, scmd->sense_buffer,
1404 SCSI_SENSE_BUFFERSIZE);
1407 static void myrb_read_capacity(struct myrb_hba *cb, struct scsi_cmnd *scmd,
1408 struct myrb_ldev_info *ldev_info)
1410 unsigned char data[8];
1412 dev_dbg(&scmd->device->sdev_gendev,
1413 "Capacity %u, blocksize %u\n",
1414 ldev_info->size, cb->ldev_block_size);
1415 put_unaligned_be32(ldev_info->size - 1, &data[0]);
1416 put_unaligned_be32(cb->ldev_block_size, &data[4]);
1417 scsi_sg_copy_from_buffer(scmd, data, 8);
1420 static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
1421 struct scsi_cmnd *scmd)
1423 struct myrb_hba *cb = shost_priv(shost);
1424 struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
1425 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
1426 struct myrb_ldev_info *ldev_info;
1427 struct scsi_device *sdev = scmd->device;
1428 struct scatterlist *sgl;
1429 unsigned long flags;
1434 ldev_info = sdev->hostdata;
1435 if (ldev_info->state != MYRB_DEVICE_ONLINE &&
1436 ldev_info->state != MYRB_DEVICE_WO) {
1437 dev_dbg(&shost->shost_gendev, "ldev %u in state %x, skip\n",
1438 sdev->id, ldev_info ? ldev_info->state : 0xff);
1439 scmd->result = (DID_BAD_TARGET << 16);
1443 switch (scmd->cmnd[0]) {
1444 case TEST_UNIT_READY:
1445 scmd->result = (DID_OK << 16);
1449 if (scmd->cmnd[1] & 1) {
1450 /* Illegal request, invalid field in CDB */
1451 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1453 myrb_inquiry(cb, scmd);
1454 scmd->result = (DID_OK << 16);
1458 case SYNCHRONIZE_CACHE:
1459 scmd->result = (DID_OK << 16);
1463 if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
1464 (scmd->cmnd[2] & 0x3F) != 0x08) {
1465 /* Illegal request, invalid field in CDB */
1466 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1468 myrb_mode_sense(cb, scmd, ldev_info);
1469 scmd->result = (DID_OK << 16);
1474 if ((scmd->cmnd[1] & 1) ||
1475 (scmd->cmnd[8] & 1)) {
1476 /* Illegal request, invalid field in CDB */
1477 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1481 lba = get_unaligned_be32(&scmd->cmnd[2]);
1483 /* Illegal request, invalid field in CDB */
1484 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1488 myrb_read_capacity(cb, scmd, ldev_info);
1492 myrb_request_sense(cb, scmd);
1493 scmd->result = (DID_OK << 16);
1495 case SEND_DIAGNOSTIC:
1496 if (scmd->cmnd[1] != 0x04) {
1497 /* Illegal request, invalid field in CDB */
1498 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1500 /* Assume good status */
1501 scmd->result = (DID_OK << 16);
1506 if (ldev_info->state == MYRB_DEVICE_WO) {
1507 /* Data protect, attempt to read invalid data */
1508 scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06);
1514 lba = (((scmd->cmnd[1] & 0x1F) << 16) |
1515 (scmd->cmnd[2] << 8) |
1517 block_cnt = scmd->cmnd[4];
1520 if (ldev_info->state == MYRB_DEVICE_WO) {
1521 /* Data protect, attempt to read invalid data */
1522 scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06);
1528 case VERIFY: /* 0x2F */
1529 case WRITE_VERIFY: /* 0x2E */
1530 lba = get_unaligned_be32(&scmd->cmnd[2]);
1531 block_cnt = get_unaligned_be16(&scmd->cmnd[7]);
1534 if (ldev_info->state == MYRB_DEVICE_WO) {
1535 /* Data protect, attempt to read invalid data */
1536 scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06);
1542 case VERIFY_12: /* 0xAF */
1543 case WRITE_VERIFY_12: /* 0xAE */
1544 lba = get_unaligned_be32(&scmd->cmnd[2]);
1545 block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
1548 /* Illegal request, invalid opcode */
1549 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x20, 0);
1554 myrb_reset_cmd(cmd_blk);
1555 mbox->type5.id = scsi_cmd_to_rq(scmd)->tag + 3;
1556 if (scmd->sc_data_direction == DMA_NONE)
1558 nsge = scsi_dma_map(scmd);
1560 sgl = scsi_sglist(scmd);
1561 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
1562 mbox->type5.opcode = MYRB_CMD_READ;
1564 mbox->type5.opcode = MYRB_CMD_WRITE;
1566 mbox->type5.ld.xfer_len = block_cnt;
1567 mbox->type5.ld.ldev_num = sdev->id;
1568 mbox->type5.lba = lba;
1569 mbox->type5.addr = (u32)sg_dma_address(sgl);
1571 struct myrb_sge *hw_sgl;
1572 dma_addr_t hw_sgl_addr;
1575 hw_sgl = dma_pool_alloc(cb->sg_pool, GFP_ATOMIC, &hw_sgl_addr);
1577 return SCSI_MLQUEUE_HOST_BUSY;
1579 cmd_blk->sgl = hw_sgl;
1580 cmd_blk->sgl_addr = hw_sgl_addr;
1582 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
1583 mbox->type5.opcode = MYRB_CMD_READ_SG;
1585 mbox->type5.opcode = MYRB_CMD_WRITE_SG;
1587 mbox->type5.ld.xfer_len = block_cnt;
1588 mbox->type5.ld.ldev_num = sdev->id;
1589 mbox->type5.lba = lba;
1590 mbox->type5.addr = hw_sgl_addr;
1591 mbox->type5.sg_count = nsge;
1593 scsi_for_each_sg(scmd, sgl, nsge, i) {
1594 hw_sgl->sge_addr = (u32)sg_dma_address(sgl);
1595 hw_sgl->sge_count = (u32)sg_dma_len(sgl);
1600 spin_lock_irqsave(&cb->queue_lock, flags);
1601 cb->qcmd(cb, cmd_blk);
1602 spin_unlock_irqrestore(&cb->queue_lock, flags);
1607 static int myrb_queuecommand(struct Scsi_Host *shost,
1608 struct scsi_cmnd *scmd)
1610 struct scsi_device *sdev = scmd->device;
1612 if (sdev->channel > myrb_logical_channel(shost)) {
1613 scmd->result = (DID_BAD_TARGET << 16);
1617 if (sdev->channel == myrb_logical_channel(shost))
1618 return myrb_ldev_queuecommand(shost, scmd);
1620 return myrb_pthru_queuecommand(shost, scmd);
1623 static int myrb_ldev_slave_alloc(struct scsi_device *sdev)
1625 struct myrb_hba *cb = shost_priv(sdev->host);
1626 struct myrb_ldev_info *ldev_info;
1627 unsigned short ldev_num = sdev->id;
1628 enum raid_level level;
1630 ldev_info = cb->ldev_info_buf + ldev_num;
1634 sdev->hostdata = kzalloc(sizeof(*ldev_info), GFP_KERNEL);
1635 if (!sdev->hostdata)
1637 dev_dbg(&sdev->sdev_gendev,
1638 "slave alloc ldev %d state %x\n",
1639 ldev_num, ldev_info->state);
1640 memcpy(sdev->hostdata, ldev_info,
1641 sizeof(*ldev_info));
1642 switch (ldev_info->raid_level) {
1643 case MYRB_RAID_LEVEL0:
1644 level = RAID_LEVEL_LINEAR;
1646 case MYRB_RAID_LEVEL1:
1647 level = RAID_LEVEL_1;
1649 case MYRB_RAID_LEVEL3:
1650 level = RAID_LEVEL_3;
1652 case MYRB_RAID_LEVEL5:
1653 level = RAID_LEVEL_5;
1655 case MYRB_RAID_LEVEL6:
1656 level = RAID_LEVEL_6;
1658 case MYRB_RAID_JBOD:
1659 level = RAID_LEVEL_JBOD;
1662 level = RAID_LEVEL_UNKNOWN;
1665 raid_set_level(myrb_raid_template, &sdev->sdev_gendev, level);
1669 static int myrb_pdev_slave_alloc(struct scsi_device *sdev)
1671 struct myrb_hba *cb = shost_priv(sdev->host);
1672 struct myrb_pdev_state *pdev_info;
1673 unsigned short status;
1675 if (sdev->id > MYRB_MAX_TARGETS)
1678 pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL);
1682 status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
1684 if (status != MYRB_STATUS_SUCCESS) {
1685 dev_dbg(&sdev->sdev_gendev,
1686 "Failed to get device state, status %x\n",
1691 if (!pdev_info->present) {
1692 dev_dbg(&sdev->sdev_gendev,
1693 "device not present, skip\n");
1697 dev_dbg(&sdev->sdev_gendev,
1698 "slave alloc pdev %d:%d state %x\n",
1699 sdev->channel, sdev->id, pdev_info->state);
1700 sdev->hostdata = pdev_info;
1705 static int myrb_slave_alloc(struct scsi_device *sdev)
1707 if (sdev->channel > myrb_logical_channel(sdev->host))
1713 if (sdev->channel == myrb_logical_channel(sdev->host))
1714 return myrb_ldev_slave_alloc(sdev);
1716 return myrb_pdev_slave_alloc(sdev);
1719 static int myrb_slave_configure(struct scsi_device *sdev)
1721 struct myrb_ldev_info *ldev_info;
1723 if (sdev->channel > myrb_logical_channel(sdev->host))
1726 if (sdev->channel < myrb_logical_channel(sdev->host)) {
1727 sdev->no_uld_attach = 1;
1733 ldev_info = sdev->hostdata;
1736 if (ldev_info->state != MYRB_DEVICE_ONLINE)
1737 sdev_printk(KERN_INFO, sdev,
1738 "Logical drive is %s\n",
1739 myrb_devstate_name(ldev_info->state));
1741 sdev->tagged_supported = 1;
1745 static void myrb_slave_destroy(struct scsi_device *sdev)
1747 kfree(sdev->hostdata);
1750 static int myrb_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1751 sector_t capacity, int geom[])
1753 struct myrb_hba *cb = shost_priv(sdev->host);
1755 geom[0] = cb->ldev_geom_heads;
1756 geom[1] = cb->ldev_geom_sectors;
1757 geom[2] = sector_div(capacity, geom[0] * geom[1]);
1762 static ssize_t raid_state_show(struct device *dev,
1763 struct device_attribute *attr, char *buf)
1765 struct scsi_device *sdev = to_scsi_device(dev);
1766 struct myrb_hba *cb = shost_priv(sdev->host);
1769 if (!sdev->hostdata)
1770 return snprintf(buf, 16, "Unknown\n");
1772 if (sdev->channel == myrb_logical_channel(sdev->host)) {
1773 struct myrb_ldev_info *ldev_info = sdev->hostdata;
1776 name = myrb_devstate_name(ldev_info->state);
1778 ret = snprintf(buf, 32, "%s\n", name);
1780 ret = snprintf(buf, 32, "Invalid (%02X)\n",
1783 struct myrb_pdev_state *pdev_info = sdev->hostdata;
1784 unsigned short status;
1787 status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
1789 if (status != MYRB_STATUS_SUCCESS)
1790 sdev_printk(KERN_INFO, sdev,
1791 "Failed to get device state, status %x\n",
1794 if (!pdev_info->present)
1797 name = myrb_devstate_name(pdev_info->state);
1799 ret = snprintf(buf, 32, "%s\n", name);
1801 ret = snprintf(buf, 32, "Invalid (%02X)\n",
1807 static ssize_t raid_state_store(struct device *dev,
1808 struct device_attribute *attr, const char *buf, size_t count)
1810 struct scsi_device *sdev = to_scsi_device(dev);
1811 struct myrb_hba *cb = shost_priv(sdev->host);
1812 struct myrb_pdev_state *pdev_info;
1813 enum myrb_devstate new_state;
1814 unsigned short status;
1816 if (!strncmp(buf, "kill", 4) ||
1817 !strncmp(buf, "offline", 7))
1818 new_state = MYRB_DEVICE_DEAD;
1819 else if (!strncmp(buf, "online", 6))
1820 new_state = MYRB_DEVICE_ONLINE;
1821 else if (!strncmp(buf, "standby", 7))
1822 new_state = MYRB_DEVICE_STANDBY;
1826 pdev_info = sdev->hostdata;
1828 sdev_printk(KERN_INFO, sdev,
1829 "Failed - no physical device information\n");
1832 if (!pdev_info->present) {
1833 sdev_printk(KERN_INFO, sdev,
1834 "Failed - device not present\n");
1838 if (pdev_info->state == new_state)
1841 status = myrb_set_pdev_state(cb, sdev, new_state);
1843 case MYRB_STATUS_SUCCESS:
1845 case MYRB_STATUS_START_DEVICE_FAILED:
1846 sdev_printk(KERN_INFO, sdev,
1847 "Failed - Unable to Start Device\n");
1850 case MYRB_STATUS_NO_DEVICE:
1851 sdev_printk(KERN_INFO, sdev,
1852 "Failed - No Device at Address\n");
1855 case MYRB_STATUS_INVALID_CHANNEL_OR_TARGET:
1856 sdev_printk(KERN_INFO, sdev,
1857 "Failed - Invalid Channel or Target or Modifier\n");
1860 case MYRB_STATUS_CHANNEL_BUSY:
1861 sdev_printk(KERN_INFO, sdev,
1862 "Failed - Channel Busy\n");
1866 sdev_printk(KERN_INFO, sdev,
1867 "Failed - Unexpected Status %04X\n", status);
1873 static DEVICE_ATTR_RW(raid_state);
1875 static ssize_t raid_level_show(struct device *dev,
1876 struct device_attribute *attr, char *buf)
1878 struct scsi_device *sdev = to_scsi_device(dev);
1880 if (sdev->channel == myrb_logical_channel(sdev->host)) {
1881 struct myrb_ldev_info *ldev_info = sdev->hostdata;
1887 name = myrb_raidlevel_name(ldev_info->raid_level);
1889 return snprintf(buf, 32, "Invalid (%02X)\n",
1891 return snprintf(buf, 32, "%s\n", name);
1893 return snprintf(buf, 32, "Physical Drive\n");
1895 static DEVICE_ATTR_RO(raid_level);
1897 static ssize_t rebuild_show(struct device *dev,
1898 struct device_attribute *attr, char *buf)
1900 struct scsi_device *sdev = to_scsi_device(dev);
1901 struct myrb_hba *cb = shost_priv(sdev->host);
1902 struct myrb_rbld_progress rbld_buf;
1903 unsigned char status;
1905 if (sdev->channel < myrb_logical_channel(sdev->host))
1906 return snprintf(buf, 32, "physical device - not rebuilding\n");
1908 status = myrb_get_rbld_progress(cb, &rbld_buf);
1910 if (rbld_buf.ldev_num != sdev->id ||
1911 status != MYRB_STATUS_SUCCESS)
1912 return snprintf(buf, 32, "not rebuilding\n");
1914 return snprintf(buf, 32, "rebuilding block %u of %u\n",
1915 rbld_buf.ldev_size - rbld_buf.blocks_left,
1916 rbld_buf.ldev_size);
1919 static ssize_t rebuild_store(struct device *dev,
1920 struct device_attribute *attr, const char *buf, size_t count)
1922 struct scsi_device *sdev = to_scsi_device(dev);
1923 struct myrb_hba *cb = shost_priv(sdev->host);
1924 struct myrb_cmdblk *cmd_blk;
1925 union myrb_cmd_mbox *mbox;
1926 unsigned short status;
1930 rc = kstrtoint(buf, 0, &start);
1934 if (sdev->channel >= myrb_logical_channel(sdev->host))
1937 status = myrb_get_rbld_progress(cb, NULL);
1939 if (status == MYRB_STATUS_SUCCESS) {
1940 sdev_printk(KERN_INFO, sdev,
1941 "Rebuild Not Initiated; already in progress\n");
1944 mutex_lock(&cb->dcmd_mutex);
1945 cmd_blk = &cb->dcmd_blk;
1946 myrb_reset_cmd(cmd_blk);
1947 mbox = &cmd_blk->mbox;
1948 mbox->type3D.opcode = MYRB_CMD_REBUILD_ASYNC;
1949 mbox->type3D.id = MYRB_DCMD_TAG;
1950 mbox->type3D.channel = sdev->channel;
1951 mbox->type3D.target = sdev->id;
1952 status = myrb_exec_cmd(cb, cmd_blk);
1953 mutex_unlock(&cb->dcmd_mutex);
1955 struct pci_dev *pdev = cb->pdev;
1956 unsigned char *rate;
1957 dma_addr_t rate_addr;
1959 if (status != MYRB_STATUS_SUCCESS) {
1960 sdev_printk(KERN_INFO, sdev,
1961 "Rebuild Not Cancelled; not in progress\n");
1965 rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
1966 &rate_addr, GFP_KERNEL);
1968 sdev_printk(KERN_INFO, sdev,
1969 "Cancellation of Rebuild Failed - Out of Memory\n");
1972 mutex_lock(&cb->dcmd_mutex);
1973 cmd_blk = &cb->dcmd_blk;
1974 myrb_reset_cmd(cmd_blk);
1975 mbox = &cmd_blk->mbox;
1976 mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
1977 mbox->type3R.id = MYRB_DCMD_TAG;
1978 mbox->type3R.rbld_rate = 0xFF;
1979 mbox->type3R.addr = rate_addr;
1980 status = myrb_exec_cmd(cb, cmd_blk);
1981 dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
1982 mutex_unlock(&cb->dcmd_mutex);
1984 if (status == MYRB_STATUS_SUCCESS) {
1985 sdev_printk(KERN_INFO, sdev, "Rebuild %s\n",
1986 start ? "Initiated" : "Cancelled");
1990 sdev_printk(KERN_INFO, sdev,
1991 "Rebuild Not Cancelled, status 0x%x\n",
1997 case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
1998 msg = "Attempt to Rebuild Online or Unresponsive Drive";
2000 case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
2001 msg = "New Disk Failed During Rebuild";
2003 case MYRB_STATUS_INVALID_ADDRESS:
2004 msg = "Invalid Device Address";
2006 case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
2007 msg = "Already in Progress";
2014 sdev_printk(KERN_INFO, sdev,
2015 "Rebuild Failed - %s\n", msg);
2017 sdev_printk(KERN_INFO, sdev,
2018 "Rebuild Failed, status 0x%x\n", status);
2022 static DEVICE_ATTR_RW(rebuild);
2024 static ssize_t consistency_check_store(struct device *dev,
2025 struct device_attribute *attr, const char *buf, size_t count)
2027 struct scsi_device *sdev = to_scsi_device(dev);
2028 struct myrb_hba *cb = shost_priv(sdev->host);
2029 struct myrb_rbld_progress rbld_buf;
2030 struct myrb_cmdblk *cmd_blk;
2031 union myrb_cmd_mbox *mbox;
2032 unsigned short ldev_num = 0xFFFF;
2033 unsigned short status;
2037 rc = kstrtoint(buf, 0, &start);
2041 if (sdev->channel < myrb_logical_channel(sdev->host))
2044 status = myrb_get_rbld_progress(cb, &rbld_buf);
2046 if (status == MYRB_STATUS_SUCCESS) {
2047 sdev_printk(KERN_INFO, sdev,
2048 "Check Consistency Not Initiated; already in progress\n");
2051 mutex_lock(&cb->dcmd_mutex);
2052 cmd_blk = &cb->dcmd_blk;
2053 myrb_reset_cmd(cmd_blk);
2054 mbox = &cmd_blk->mbox;
2055 mbox->type3C.opcode = MYRB_CMD_CHECK_CONSISTENCY_ASYNC;
2056 mbox->type3C.id = MYRB_DCMD_TAG;
2057 mbox->type3C.ldev_num = sdev->id;
2058 mbox->type3C.auto_restore = true;
2060 status = myrb_exec_cmd(cb, cmd_blk);
2061 mutex_unlock(&cb->dcmd_mutex);
2063 struct pci_dev *pdev = cb->pdev;
2064 unsigned char *rate;
2065 dma_addr_t rate_addr;
2067 if (ldev_num != sdev->id) {
2068 sdev_printk(KERN_INFO, sdev,
2069 "Check Consistency Not Cancelled; not in progress\n");
2072 rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
2073 &rate_addr, GFP_KERNEL);
2075 sdev_printk(KERN_INFO, sdev,
2076 "Cancellation of Check Consistency Failed - Out of Memory\n");
2079 mutex_lock(&cb->dcmd_mutex);
2080 cmd_blk = &cb->dcmd_blk;
2081 myrb_reset_cmd(cmd_blk);
2082 mbox = &cmd_blk->mbox;
2083 mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
2084 mbox->type3R.id = MYRB_DCMD_TAG;
2085 mbox->type3R.rbld_rate = 0xFF;
2086 mbox->type3R.addr = rate_addr;
2087 status = myrb_exec_cmd(cb, cmd_blk);
2088 dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
2089 mutex_unlock(&cb->dcmd_mutex);
2091 if (status == MYRB_STATUS_SUCCESS) {
2092 sdev_printk(KERN_INFO, sdev, "Check Consistency %s\n",
2093 start ? "Initiated" : "Cancelled");
2097 sdev_printk(KERN_INFO, sdev,
2098 "Check Consistency Not Cancelled, status 0x%x\n",
2104 case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
2105 msg = "Dependent Physical Device is DEAD";
2107 case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
2108 msg = "New Disk Failed During Rebuild";
2110 case MYRB_STATUS_INVALID_ADDRESS:
2111 msg = "Invalid or Nonredundant Logical Drive";
2113 case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
2114 msg = "Already in Progress";
2121 sdev_printk(KERN_INFO, sdev,
2122 "Check Consistency Failed - %s\n", msg);
2124 sdev_printk(KERN_INFO, sdev,
2125 "Check Consistency Failed, status 0x%x\n", status);
2130 static ssize_t consistency_check_show(struct device *dev,
2131 struct device_attribute *attr, char *buf)
2133 return rebuild_show(dev, attr, buf);
2135 static DEVICE_ATTR_RW(consistency_check);
2137 static ssize_t ctlr_num_show(struct device *dev,
2138 struct device_attribute *attr, char *buf)
2140 struct Scsi_Host *shost = class_to_shost(dev);
2141 struct myrb_hba *cb = shost_priv(shost);
2143 return snprintf(buf, 20, "%u\n", cb->ctlr_num);
2145 static DEVICE_ATTR_RO(ctlr_num);
2147 static ssize_t firmware_show(struct device *dev,
2148 struct device_attribute *attr, char *buf)
2150 struct Scsi_Host *shost = class_to_shost(dev);
2151 struct myrb_hba *cb = shost_priv(shost);
2153 return snprintf(buf, 16, "%s\n", cb->fw_version);
2155 static DEVICE_ATTR_RO(firmware);
2157 static ssize_t model_show(struct device *dev,
2158 struct device_attribute *attr, char *buf)
2160 struct Scsi_Host *shost = class_to_shost(dev);
2161 struct myrb_hba *cb = shost_priv(shost);
2163 return snprintf(buf, 16, "%s\n", cb->model_name);
2165 static DEVICE_ATTR_RO(model);
2167 static ssize_t flush_cache_store(struct device *dev,
2168 struct device_attribute *attr, const char *buf, size_t count)
2170 struct Scsi_Host *shost = class_to_shost(dev);
2171 struct myrb_hba *cb = shost_priv(shost);
2172 unsigned short status;
2174 status = myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
2175 if (status == MYRB_STATUS_SUCCESS) {
2176 shost_printk(KERN_INFO, shost,
2177 "Cache Flush Completed\n");
2180 shost_printk(KERN_INFO, shost,
2181 "Cache Flush Failed, status %x\n", status);
2184 static DEVICE_ATTR_WO(flush_cache);
2186 static struct attribute *myrb_sdev_attrs[] = {
2187 &dev_attr_rebuild.attr,
2188 &dev_attr_consistency_check.attr,
2189 &dev_attr_raid_state.attr,
2190 &dev_attr_raid_level.attr,
2194 ATTRIBUTE_GROUPS(myrb_sdev);
2196 static struct attribute *myrb_shost_attrs[] = {
2197 &dev_attr_ctlr_num.attr,
2198 &dev_attr_model.attr,
2199 &dev_attr_firmware.attr,
2200 &dev_attr_flush_cache.attr,
2204 ATTRIBUTE_GROUPS(myrb_shost);
2206 static struct scsi_host_template myrb_template = {
2207 .module = THIS_MODULE,
2209 .proc_name = "myrb",
2210 .queuecommand = myrb_queuecommand,
2211 .eh_host_reset_handler = myrb_host_reset,
2212 .slave_alloc = myrb_slave_alloc,
2213 .slave_configure = myrb_slave_configure,
2214 .slave_destroy = myrb_slave_destroy,
2215 .bios_param = myrb_biosparam,
2216 .cmd_size = sizeof(struct myrb_cmdblk),
2217 .shost_groups = myrb_shost_groups,
2218 .sdev_groups = myrb_sdev_groups,
2223 * myrb_is_raid - return boolean indicating device is raid volume
2224 * @dev: the device struct object
2226 static int myrb_is_raid(struct device *dev)
2228 struct scsi_device *sdev = to_scsi_device(dev);
2230 return sdev->channel == myrb_logical_channel(sdev->host);
2234 * myrb_get_resync - get raid volume resync percent complete
2235 * @dev: the device struct object
2237 static void myrb_get_resync(struct device *dev)
2239 struct scsi_device *sdev = to_scsi_device(dev);
2240 struct myrb_hba *cb = shost_priv(sdev->host);
2241 struct myrb_rbld_progress rbld_buf;
2242 unsigned int percent_complete = 0;
2243 unsigned short status;
2244 unsigned int ldev_size = 0, remaining = 0;
2246 if (sdev->channel < myrb_logical_channel(sdev->host))
2248 status = myrb_get_rbld_progress(cb, &rbld_buf);
2249 if (status == MYRB_STATUS_SUCCESS) {
2250 if (rbld_buf.ldev_num == sdev->id) {
2251 ldev_size = rbld_buf.ldev_size;
2252 remaining = rbld_buf.blocks_left;
2255 if (remaining && ldev_size)
2256 percent_complete = (ldev_size - remaining) * 100 / ldev_size;
2257 raid_set_resync(myrb_raid_template, dev, percent_complete);
2261 * myrb_get_state - get raid volume status
2262 * @dev: the device struct object
2264 static void myrb_get_state(struct device *dev)
2266 struct scsi_device *sdev = to_scsi_device(dev);
2267 struct myrb_hba *cb = shost_priv(sdev->host);
2268 struct myrb_ldev_info *ldev_info = sdev->hostdata;
2269 enum raid_state state = RAID_STATE_UNKNOWN;
2270 unsigned short status;
2272 if (sdev->channel < myrb_logical_channel(sdev->host) || !ldev_info)
2273 state = RAID_STATE_UNKNOWN;
2275 status = myrb_get_rbld_progress(cb, NULL);
2276 if (status == MYRB_STATUS_SUCCESS)
2277 state = RAID_STATE_RESYNCING;
2279 switch (ldev_info->state) {
2280 case MYRB_DEVICE_ONLINE:
2281 state = RAID_STATE_ACTIVE;
2283 case MYRB_DEVICE_WO:
2284 case MYRB_DEVICE_CRITICAL:
2285 state = RAID_STATE_DEGRADED;
2288 state = RAID_STATE_OFFLINE;
2292 raid_set_state(myrb_raid_template, dev, state);
2295 static struct raid_function_template myrb_raid_functions = {
2296 .cookie = &myrb_template,
2297 .is_raid = myrb_is_raid,
2298 .get_resync = myrb_get_resync,
2299 .get_state = myrb_get_state,
2302 static void myrb_handle_scsi(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk,
2303 struct scsi_cmnd *scmd)
2305 unsigned short status;
2310 scsi_dma_unmap(scmd);
2312 if (cmd_blk->dcdb) {
2313 memcpy(scmd->sense_buffer, &cmd_blk->dcdb->sense, 64);
2314 dma_pool_free(cb->dcdb_pool, cmd_blk->dcdb,
2315 cmd_blk->dcdb_addr);
2316 cmd_blk->dcdb = NULL;
2319 dma_pool_free(cb->sg_pool, cmd_blk->sgl, cmd_blk->sgl_addr);
2320 cmd_blk->sgl = NULL;
2321 cmd_blk->sgl_addr = 0;
2323 status = cmd_blk->status;
2325 case MYRB_STATUS_SUCCESS:
2326 case MYRB_STATUS_DEVICE_BUSY:
2327 scmd->result = (DID_OK << 16) | status;
2329 case MYRB_STATUS_BAD_DATA:
2330 dev_dbg(&scmd->device->sdev_gendev,
2331 "Bad Data Encountered\n");
2332 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
2333 /* Unrecovered read error */
2334 scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x11, 0);
2337 scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x0C, 0);
2339 case MYRB_STATUS_IRRECOVERABLE_DATA_ERROR:
2340 scmd_printk(KERN_ERR, scmd, "Irrecoverable Data Error\n");
2341 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
2342 /* Unrecovered read error, auto-reallocation failed */
2343 scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x11, 0x04);
2345 /* Write error, auto-reallocation failed */
2346 scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x0C, 0x02);
2348 case MYRB_STATUS_LDRV_NONEXISTENT_OR_OFFLINE:
2349 dev_dbg(&scmd->device->sdev_gendev,
2350 "Logical Drive Nonexistent or Offline");
2351 scmd->result = (DID_BAD_TARGET << 16);
2353 case MYRB_STATUS_ACCESS_BEYOND_END_OF_LDRV:
2354 dev_dbg(&scmd->device->sdev_gendev,
2355 "Attempt to Access Beyond End of Logical Drive");
2356 /* Logical block address out of range */
2357 scsi_build_sense(scmd, 0, NOT_READY, 0x21, 0);
2359 case MYRB_STATUS_DEVICE_NONRESPONSIVE:
2360 dev_dbg(&scmd->device->sdev_gendev, "Device nonresponsive\n");
2361 scmd->result = (DID_BAD_TARGET << 16);
2364 scmd_printk(KERN_ERR, scmd,
2365 "Unexpected Error Status %04X", status);
2366 scmd->result = (DID_ERROR << 16);
2372 static void myrb_handle_cmdblk(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
2377 if (cmd_blk->completion) {
2378 complete(cmd_blk->completion);
2379 cmd_blk->completion = NULL;
2383 static void myrb_monitor(struct work_struct *work)
2385 struct myrb_hba *cb = container_of(work,
2386 struct myrb_hba, monitor_work.work);
2387 struct Scsi_Host *shost = cb->host;
2388 unsigned long interval = MYRB_PRIMARY_MONITOR_INTERVAL;
2390 dev_dbg(&shost->shost_gendev, "monitor tick\n");
2392 if (cb->new_ev_seq > cb->old_ev_seq) {
2393 int event = cb->old_ev_seq;
2395 dev_dbg(&shost->shost_gendev,
2396 "get event log no %d/%d\n",
2397 cb->new_ev_seq, event);
2398 myrb_get_event(cb, event);
2399 cb->old_ev_seq = event + 1;
2401 } else if (cb->need_err_info) {
2402 cb->need_err_info = false;
2403 dev_dbg(&shost->shost_gendev, "get error table\n");
2404 myrb_get_errtable(cb);
2406 } else if (cb->need_rbld && cb->rbld_first) {
2407 cb->need_rbld = false;
2408 dev_dbg(&shost->shost_gendev,
2409 "get rebuild progress\n");
2410 myrb_update_rbld_progress(cb);
2412 } else if (cb->need_ldev_info) {
2413 cb->need_ldev_info = false;
2414 dev_dbg(&shost->shost_gendev,
2415 "get logical drive info\n");
2416 myrb_get_ldev_info(cb);
2418 } else if (cb->need_rbld) {
2419 cb->need_rbld = false;
2420 dev_dbg(&shost->shost_gendev,
2421 "get rebuild progress\n");
2422 myrb_update_rbld_progress(cb);
2424 } else if (cb->need_cc_status) {
2425 cb->need_cc_status = false;
2426 dev_dbg(&shost->shost_gendev,
2427 "get consistency check progress\n");
2428 myrb_get_cc_progress(cb);
2430 } else if (cb->need_bgi_status) {
2431 cb->need_bgi_status = false;
2432 dev_dbg(&shost->shost_gendev, "get background init status\n");
2433 myrb_bgi_control(cb);
2436 dev_dbg(&shost->shost_gendev, "new enquiry\n");
2437 mutex_lock(&cb->dma_mutex);
2438 myrb_hba_enquiry(cb);
2439 mutex_unlock(&cb->dma_mutex);
2440 if ((cb->new_ev_seq - cb->old_ev_seq > 0) ||
2441 cb->need_err_info || cb->need_rbld ||
2442 cb->need_ldev_info || cb->need_cc_status ||
2443 cb->need_bgi_status) {
2444 dev_dbg(&shost->shost_gendev,
2445 "reschedule monitor\n");
2450 cb->primary_monitor_time = jiffies;
2451 queue_delayed_work(cb->work_q, &cb->monitor_work, interval);
2455 * myrb_err_status - reports controller BIOS messages
2457 * Controller BIOS messages are passed through the Error Status Register
2458 * when the driver performs the BIOS handshaking.
2460 * Return: true for fatal errors and false otherwise.
2462 static bool myrb_err_status(struct myrb_hba *cb, unsigned char error,
2463 unsigned char parm0, unsigned char parm1)
2465 struct pci_dev *pdev = cb->pdev;
2469 dev_info(&pdev->dev,
2470 "Physical Device %d:%d Not Responding\n",
2474 dev_notice(&pdev->dev, "Spinning Up Drives\n");
2477 dev_notice(&pdev->dev, "Configuration Checksum Error\n");
2480 dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n");
2483 dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n");
2486 dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n",
2490 dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n");
2493 dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n");
2496 dev_notice(&pdev->dev, "New Controller Configuration Found\n");
2499 dev_err(&pdev->dev, "Fatal Memory Parity Error\n");
2502 dev_err(&pdev->dev, "Unknown Initialization Error %02X\n",
2510 * Hardware-specific functions
2514 * DAC960 LA Series Controllers
2517 static inline void DAC960_LA_hw_mbox_new_cmd(void __iomem *base)
2519 writeb(DAC960_LA_IDB_HWMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
2522 static inline void DAC960_LA_ack_hw_mbox_status(void __iomem *base)
2524 writeb(DAC960_LA_IDB_HWMBOX_ACK_STS, base + DAC960_LA_IDB_OFFSET);
2527 static inline void DAC960_LA_reset_ctrl(void __iomem *base)
2529 writeb(DAC960_LA_IDB_CTRL_RESET, base + DAC960_LA_IDB_OFFSET);
2532 static inline void DAC960_LA_mem_mbox_new_cmd(void __iomem *base)
2534 writeb(DAC960_LA_IDB_MMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
2537 static inline bool DAC960_LA_hw_mbox_is_full(void __iomem *base)
2539 unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
2541 return !(idb & DAC960_LA_IDB_HWMBOX_EMPTY);
2544 static inline bool DAC960_LA_init_in_progress(void __iomem *base)
2546 unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
2548 return !(idb & DAC960_LA_IDB_INIT_DONE);
2551 static inline void DAC960_LA_ack_hw_mbox_intr(void __iomem *base)
2553 writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET);
2556 static inline void DAC960_LA_ack_intr(void __iomem *base)
2558 writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ | DAC960_LA_ODB_MMBOX_ACK_IRQ,
2559 base + DAC960_LA_ODB_OFFSET);
2562 static inline bool DAC960_LA_hw_mbox_status_available(void __iomem *base)
2564 unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET);
2566 return odb & DAC960_LA_ODB_HWMBOX_STS_AVAIL;
2569 static inline void DAC960_LA_enable_intr(void __iomem *base)
2571 unsigned char odb = 0xFF;
2573 odb &= ~DAC960_LA_IRQMASK_DISABLE_IRQ;
2574 writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
2577 static inline void DAC960_LA_disable_intr(void __iomem *base)
2579 unsigned char odb = 0xFF;
2581 odb |= DAC960_LA_IRQMASK_DISABLE_IRQ;
2582 writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
2585 static inline void DAC960_LA_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
2586 union myrb_cmd_mbox *mbox)
2588 mem_mbox->words[1] = mbox->words[1];
2589 mem_mbox->words[2] = mbox->words[2];
2590 mem_mbox->words[3] = mbox->words[3];
2591 /* Memory barrier to prevent reordering */
2593 mem_mbox->words[0] = mbox->words[0];
2594 /* Memory barrier to force PCI access */
2598 static inline void DAC960_LA_write_hw_mbox(void __iomem *base,
2599 union myrb_cmd_mbox *mbox)
2601 writel(mbox->words[0], base + DAC960_LA_CMDOP_OFFSET);
2602 writel(mbox->words[1], base + DAC960_LA_MBOX4_OFFSET);
2603 writel(mbox->words[2], base + DAC960_LA_MBOX8_OFFSET);
2604 writeb(mbox->bytes[12], base + DAC960_LA_MBOX12_OFFSET);
2607 static inline unsigned short DAC960_LA_read_status(void __iomem *base)
2609 return readw(base + DAC960_LA_STS_OFFSET);
2613 DAC960_LA_read_error_status(void __iomem *base, unsigned char *error,
2614 unsigned char *param0, unsigned char *param1)
2616 unsigned char errsts = readb(base + DAC960_LA_ERRSTS_OFFSET);
2618 if (!(errsts & DAC960_LA_ERRSTS_PENDING))
2620 errsts &= ~DAC960_LA_ERRSTS_PENDING;
2623 *param0 = readb(base + DAC960_LA_CMDOP_OFFSET);
2624 *param1 = readb(base + DAC960_LA_CMDID_OFFSET);
2625 writeb(0xFF, base + DAC960_LA_ERRSTS_OFFSET);
2629 static inline unsigned short
2630 DAC960_LA_mbox_init(struct pci_dev *pdev, void __iomem *base,
2631 union myrb_cmd_mbox *mbox)
2633 unsigned short status;
2636 while (timeout < MYRB_MAILBOX_TIMEOUT) {
2637 if (!DAC960_LA_hw_mbox_is_full(base))
2642 if (DAC960_LA_hw_mbox_is_full(base)) {
2644 "Timeout waiting for empty mailbox\n");
2645 return MYRB_STATUS_SUBSYS_TIMEOUT;
2647 DAC960_LA_write_hw_mbox(base, mbox);
2648 DAC960_LA_hw_mbox_new_cmd(base);
2650 while (timeout < MYRB_MAILBOX_TIMEOUT) {
2651 if (DAC960_LA_hw_mbox_status_available(base))
2656 if (!DAC960_LA_hw_mbox_status_available(base)) {
2657 dev_err(&pdev->dev, "Timeout waiting for mailbox status\n");
2658 return MYRB_STATUS_SUBSYS_TIMEOUT;
2660 status = DAC960_LA_read_status(base);
2661 DAC960_LA_ack_hw_mbox_intr(base);
2662 DAC960_LA_ack_hw_mbox_status(base);
2667 static int DAC960_LA_hw_init(struct pci_dev *pdev,
2668 struct myrb_hba *cb, void __iomem *base)
2671 unsigned char error, parm0, parm1;
2673 DAC960_LA_disable_intr(base);
2674 DAC960_LA_ack_hw_mbox_status(base);
2676 while (DAC960_LA_init_in_progress(base) &&
2677 timeout < MYRB_MAILBOX_TIMEOUT) {
2678 if (DAC960_LA_read_error_status(base, &error,
2680 myrb_err_status(cb, error, parm0, parm1))
2685 if (timeout == MYRB_MAILBOX_TIMEOUT) {
2687 "Timeout waiting for Controller Initialisation\n");
2690 if (!myrb_enable_mmio(cb, DAC960_LA_mbox_init)) {
2692 "Unable to Enable Memory Mailbox Interface\n");
2693 DAC960_LA_reset_ctrl(base);
2696 DAC960_LA_enable_intr(base);
2697 cb->qcmd = myrb_qcmd;
2698 cb->write_cmd_mbox = DAC960_LA_write_cmd_mbox;
2699 if (cb->dual_mode_interface)
2700 cb->get_cmd_mbox = DAC960_LA_mem_mbox_new_cmd;
2702 cb->get_cmd_mbox = DAC960_LA_hw_mbox_new_cmd;
2703 cb->disable_intr = DAC960_LA_disable_intr;
2704 cb->reset = DAC960_LA_reset_ctrl;
2709 static irqreturn_t DAC960_LA_intr_handler(int irq, void *arg)
2711 struct myrb_hba *cb = arg;
2712 void __iomem *base = cb->io_base;
2713 struct myrb_stat_mbox *next_stat_mbox;
2714 unsigned long flags;
2716 spin_lock_irqsave(&cb->queue_lock, flags);
2717 DAC960_LA_ack_intr(base);
2718 next_stat_mbox = cb->next_stat_mbox;
2719 while (next_stat_mbox->valid) {
2720 unsigned char id = next_stat_mbox->id;
2721 struct scsi_cmnd *scmd = NULL;
2722 struct myrb_cmdblk *cmd_blk = NULL;
2724 if (id == MYRB_DCMD_TAG)
2725 cmd_blk = &cb->dcmd_blk;
2726 else if (id == MYRB_MCMD_TAG)
2727 cmd_blk = &cb->mcmd_blk;
2729 scmd = scsi_host_find_tag(cb->host, id - 3);
2731 cmd_blk = scsi_cmd_priv(scmd);
2734 cmd_blk->status = next_stat_mbox->status;
2736 dev_err(&cb->pdev->dev,
2737 "Unhandled command completion %d\n", id);
2739 memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
2740 if (++next_stat_mbox > cb->last_stat_mbox)
2741 next_stat_mbox = cb->first_stat_mbox;
2745 myrb_handle_cmdblk(cb, cmd_blk);
2747 myrb_handle_scsi(cb, cmd_blk, scmd);
2750 cb->next_stat_mbox = next_stat_mbox;
2751 spin_unlock_irqrestore(&cb->queue_lock, flags);
2755 static struct myrb_privdata DAC960_LA_privdata = {
2756 .hw_init = DAC960_LA_hw_init,
2757 .irq_handler = DAC960_LA_intr_handler,
2758 .mmio_size = DAC960_LA_mmio_size,
2762 * DAC960 PG Series Controllers
2764 static inline void DAC960_PG_hw_mbox_new_cmd(void __iomem *base)
2766 writel(DAC960_PG_IDB_HWMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
2769 static inline void DAC960_PG_ack_hw_mbox_status(void __iomem *base)
2771 writel(DAC960_PG_IDB_HWMBOX_ACK_STS, base + DAC960_PG_IDB_OFFSET);
2774 static inline void DAC960_PG_reset_ctrl(void __iomem *base)
2776 writel(DAC960_PG_IDB_CTRL_RESET, base + DAC960_PG_IDB_OFFSET);
2779 static inline void DAC960_PG_mem_mbox_new_cmd(void __iomem *base)
2781 writel(DAC960_PG_IDB_MMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
2784 static inline bool DAC960_PG_hw_mbox_is_full(void __iomem *base)
2786 unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
2788 return idb & DAC960_PG_IDB_HWMBOX_FULL;
2791 static inline bool DAC960_PG_init_in_progress(void __iomem *base)
2793 unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
2795 return idb & DAC960_PG_IDB_INIT_IN_PROGRESS;
2798 static inline void DAC960_PG_ack_hw_mbox_intr(void __iomem *base)
2800 writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET);
2803 static inline void DAC960_PG_ack_intr(void __iomem *base)
2805 writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ | DAC960_PG_ODB_MMBOX_ACK_IRQ,
2806 base + DAC960_PG_ODB_OFFSET);
2809 static inline bool DAC960_PG_hw_mbox_status_available(void __iomem *base)
2811 unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET);
2813 return odb & DAC960_PG_ODB_HWMBOX_STS_AVAIL;
2816 static inline void DAC960_PG_enable_intr(void __iomem *base)
2818 unsigned int imask = (unsigned int)-1;
2820 imask &= ~DAC960_PG_IRQMASK_DISABLE_IRQ;
2821 writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
2824 static inline void DAC960_PG_disable_intr(void __iomem *base)
2826 unsigned int imask = (unsigned int)-1;
2828 writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
2831 static inline void DAC960_PG_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
2832 union myrb_cmd_mbox *mbox)
2834 mem_mbox->words[1] = mbox->words[1];
2835 mem_mbox->words[2] = mbox->words[2];
2836 mem_mbox->words[3] = mbox->words[3];
2837 /* Memory barrier to prevent reordering */
2839 mem_mbox->words[0] = mbox->words[0];
2840 /* Memory barrier to force PCI access */
2844 static inline void DAC960_PG_write_hw_mbox(void __iomem *base,
2845 union myrb_cmd_mbox *mbox)
2847 writel(mbox->words[0], base + DAC960_PG_CMDOP_OFFSET);
2848 writel(mbox->words[1], base + DAC960_PG_MBOX4_OFFSET);
2849 writel(mbox->words[2], base + DAC960_PG_MBOX8_OFFSET);
2850 writeb(mbox->bytes[12], base + DAC960_PG_MBOX12_OFFSET);
2853 static inline unsigned short
2854 DAC960_PG_read_status(void __iomem *base)
2856 return readw(base + DAC960_PG_STS_OFFSET);
2860 DAC960_PG_read_error_status(void __iomem *base, unsigned char *error,
2861 unsigned char *param0, unsigned char *param1)
2863 unsigned char errsts = readb(base + DAC960_PG_ERRSTS_OFFSET);
2865 if (!(errsts & DAC960_PG_ERRSTS_PENDING))
2867 errsts &= ~DAC960_PG_ERRSTS_PENDING;
2869 *param0 = readb(base + DAC960_PG_CMDOP_OFFSET);
2870 *param1 = readb(base + DAC960_PG_CMDID_OFFSET);
2871 writeb(0, base + DAC960_PG_ERRSTS_OFFSET);
2875 static inline unsigned short
2876 DAC960_PG_mbox_init(struct pci_dev *pdev, void __iomem *base,
2877 union myrb_cmd_mbox *mbox)
2879 unsigned short status;
2882 while (timeout < MYRB_MAILBOX_TIMEOUT) {
2883 if (!DAC960_PG_hw_mbox_is_full(base))
2888 if (DAC960_PG_hw_mbox_is_full(base)) {
2890 "Timeout waiting for empty mailbox\n");
2891 return MYRB_STATUS_SUBSYS_TIMEOUT;
2893 DAC960_PG_write_hw_mbox(base, mbox);
2894 DAC960_PG_hw_mbox_new_cmd(base);
2897 while (timeout < MYRB_MAILBOX_TIMEOUT) {
2898 if (DAC960_PG_hw_mbox_status_available(base))
2903 if (!DAC960_PG_hw_mbox_status_available(base)) {
2905 "Timeout waiting for mailbox status\n");
2906 return MYRB_STATUS_SUBSYS_TIMEOUT;
2908 status = DAC960_PG_read_status(base);
2909 DAC960_PG_ack_hw_mbox_intr(base);
2910 DAC960_PG_ack_hw_mbox_status(base);
2915 static int DAC960_PG_hw_init(struct pci_dev *pdev,
2916 struct myrb_hba *cb, void __iomem *base)
2919 unsigned char error, parm0, parm1;
2921 DAC960_PG_disable_intr(base);
2922 DAC960_PG_ack_hw_mbox_status(base);
2924 while (DAC960_PG_init_in_progress(base) &&
2925 timeout < MYRB_MAILBOX_TIMEOUT) {
2926 if (DAC960_PG_read_error_status(base, &error,
2928 myrb_err_status(cb, error, parm0, parm1))
2933 if (timeout == MYRB_MAILBOX_TIMEOUT) {
2935 "Timeout waiting for Controller Initialisation\n");
2938 if (!myrb_enable_mmio(cb, DAC960_PG_mbox_init)) {
2940 "Unable to Enable Memory Mailbox Interface\n");
2941 DAC960_PG_reset_ctrl(base);
2944 DAC960_PG_enable_intr(base);
2945 cb->qcmd = myrb_qcmd;
2946 cb->write_cmd_mbox = DAC960_PG_write_cmd_mbox;
2947 if (cb->dual_mode_interface)
2948 cb->get_cmd_mbox = DAC960_PG_mem_mbox_new_cmd;
2950 cb->get_cmd_mbox = DAC960_PG_hw_mbox_new_cmd;
2951 cb->disable_intr = DAC960_PG_disable_intr;
2952 cb->reset = DAC960_PG_reset_ctrl;
2957 static irqreturn_t DAC960_PG_intr_handler(int irq, void *arg)
2959 struct myrb_hba *cb = arg;
2960 void __iomem *base = cb->io_base;
2961 struct myrb_stat_mbox *next_stat_mbox;
2962 unsigned long flags;
2964 spin_lock_irqsave(&cb->queue_lock, flags);
2965 DAC960_PG_ack_intr(base);
2966 next_stat_mbox = cb->next_stat_mbox;
2967 while (next_stat_mbox->valid) {
2968 unsigned char id = next_stat_mbox->id;
2969 struct scsi_cmnd *scmd = NULL;
2970 struct myrb_cmdblk *cmd_blk = NULL;
2972 if (id == MYRB_DCMD_TAG)
2973 cmd_blk = &cb->dcmd_blk;
2974 else if (id == MYRB_MCMD_TAG)
2975 cmd_blk = &cb->mcmd_blk;
2977 scmd = scsi_host_find_tag(cb->host, id - 3);
2979 cmd_blk = scsi_cmd_priv(scmd);
2982 cmd_blk->status = next_stat_mbox->status;
2984 dev_err(&cb->pdev->dev,
2985 "Unhandled command completion %d\n", id);
2987 memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
2988 if (++next_stat_mbox > cb->last_stat_mbox)
2989 next_stat_mbox = cb->first_stat_mbox;
2992 myrb_handle_cmdblk(cb, cmd_blk);
2994 myrb_handle_scsi(cb, cmd_blk, scmd);
2996 cb->next_stat_mbox = next_stat_mbox;
2997 spin_unlock_irqrestore(&cb->queue_lock, flags);
3001 static struct myrb_privdata DAC960_PG_privdata = {
3002 .hw_init = DAC960_PG_hw_init,
3003 .irq_handler = DAC960_PG_intr_handler,
3004 .mmio_size = DAC960_PG_mmio_size,
3009 * DAC960 PD Series Controllers
3012 static inline void DAC960_PD_hw_mbox_new_cmd(void __iomem *base)
3014 writeb(DAC960_PD_IDB_HWMBOX_NEW_CMD, base + DAC960_PD_IDB_OFFSET);
3017 static inline void DAC960_PD_ack_hw_mbox_status(void __iomem *base)
3019 writeb(DAC960_PD_IDB_HWMBOX_ACK_STS, base + DAC960_PD_IDB_OFFSET);
3022 static inline void DAC960_PD_reset_ctrl(void __iomem *base)
3024 writeb(DAC960_PD_IDB_CTRL_RESET, base + DAC960_PD_IDB_OFFSET);
3027 static inline bool DAC960_PD_hw_mbox_is_full(void __iomem *base)
3029 unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
3031 return idb & DAC960_PD_IDB_HWMBOX_FULL;
3034 static inline bool DAC960_PD_init_in_progress(void __iomem *base)
3036 unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
3038 return idb & DAC960_PD_IDB_INIT_IN_PROGRESS;
3041 static inline void DAC960_PD_ack_intr(void __iomem *base)
3043 writeb(DAC960_PD_ODB_HWMBOX_ACK_IRQ, base + DAC960_PD_ODB_OFFSET);
3046 static inline bool DAC960_PD_hw_mbox_status_available(void __iomem *base)
3048 unsigned char odb = readb(base + DAC960_PD_ODB_OFFSET);
3050 return odb & DAC960_PD_ODB_HWMBOX_STS_AVAIL;
3053 static inline void DAC960_PD_enable_intr(void __iomem *base)
3055 writeb(DAC960_PD_IRQMASK_ENABLE_IRQ, base + DAC960_PD_IRQEN_OFFSET);
3058 static inline void DAC960_PD_disable_intr(void __iomem *base)
3060 writeb(0, base + DAC960_PD_IRQEN_OFFSET);
3063 static inline void DAC960_PD_write_cmd_mbox(void __iomem *base,
3064 union myrb_cmd_mbox *mbox)
3066 writel(mbox->words[0], base + DAC960_PD_CMDOP_OFFSET);
3067 writel(mbox->words[1], base + DAC960_PD_MBOX4_OFFSET);
3068 writel(mbox->words[2], base + DAC960_PD_MBOX8_OFFSET);
3069 writeb(mbox->bytes[12], base + DAC960_PD_MBOX12_OFFSET);
3072 static inline unsigned char
3073 DAC960_PD_read_status_cmd_ident(void __iomem *base)
3075 return readb(base + DAC960_PD_STSID_OFFSET);
3078 static inline unsigned short
3079 DAC960_PD_read_status(void __iomem *base)
3081 return readw(base + DAC960_PD_STS_OFFSET);
3085 DAC960_PD_read_error_status(void __iomem *base, unsigned char *error,
3086 unsigned char *param0, unsigned char *param1)
3088 unsigned char errsts = readb(base + DAC960_PD_ERRSTS_OFFSET);
3090 if (!(errsts & DAC960_PD_ERRSTS_PENDING))
3092 errsts &= ~DAC960_PD_ERRSTS_PENDING;
3094 *param0 = readb(base + DAC960_PD_CMDOP_OFFSET);
3095 *param1 = readb(base + DAC960_PD_CMDID_OFFSET);
3096 writeb(0, base + DAC960_PD_ERRSTS_OFFSET);
3100 static void DAC960_PD_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
3102 void __iomem *base = cb->io_base;
3103 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3105 while (DAC960_PD_hw_mbox_is_full(base))
3107 DAC960_PD_write_cmd_mbox(base, mbox);
3108 DAC960_PD_hw_mbox_new_cmd(base);
3111 static int DAC960_PD_hw_init(struct pci_dev *pdev,
3112 struct myrb_hba *cb, void __iomem *base)
3115 unsigned char error, parm0, parm1;
3117 if (!request_region(cb->io_addr, 0x80, "myrb")) {
3118 dev_err(&pdev->dev, "IO port 0x%lx busy\n",
3119 (unsigned long)cb->io_addr);
3122 DAC960_PD_disable_intr(base);
3123 DAC960_PD_ack_hw_mbox_status(base);
3125 while (DAC960_PD_init_in_progress(base) &&
3126 timeout < MYRB_MAILBOX_TIMEOUT) {
3127 if (DAC960_PD_read_error_status(base, &error,
3129 myrb_err_status(cb, error, parm0, parm1))
3134 if (timeout == MYRB_MAILBOX_TIMEOUT) {
3136 "Timeout waiting for Controller Initialisation\n");
3139 if (!myrb_enable_mmio(cb, NULL)) {
3141 "Unable to Enable Memory Mailbox Interface\n");
3142 DAC960_PD_reset_ctrl(base);
3145 DAC960_PD_enable_intr(base);
3146 cb->qcmd = DAC960_PD_qcmd;
3147 cb->disable_intr = DAC960_PD_disable_intr;
3148 cb->reset = DAC960_PD_reset_ctrl;
3153 static irqreturn_t DAC960_PD_intr_handler(int irq, void *arg)
3155 struct myrb_hba *cb = arg;
3156 void __iomem *base = cb->io_base;
3157 unsigned long flags;
3159 spin_lock_irqsave(&cb->queue_lock, flags);
3160 while (DAC960_PD_hw_mbox_status_available(base)) {
3161 unsigned char id = DAC960_PD_read_status_cmd_ident(base);
3162 struct scsi_cmnd *scmd = NULL;
3163 struct myrb_cmdblk *cmd_blk = NULL;
3165 if (id == MYRB_DCMD_TAG)
3166 cmd_blk = &cb->dcmd_blk;
3167 else if (id == MYRB_MCMD_TAG)
3168 cmd_blk = &cb->mcmd_blk;
3170 scmd = scsi_host_find_tag(cb->host, id - 3);
3172 cmd_blk = scsi_cmd_priv(scmd);
3175 cmd_blk->status = DAC960_PD_read_status(base);
3177 dev_err(&cb->pdev->dev,
3178 "Unhandled command completion %d\n", id);
3180 DAC960_PD_ack_intr(base);
3181 DAC960_PD_ack_hw_mbox_status(base);
3184 myrb_handle_cmdblk(cb, cmd_blk);
3186 myrb_handle_scsi(cb, cmd_blk, scmd);
3188 spin_unlock_irqrestore(&cb->queue_lock, flags);
3192 static struct myrb_privdata DAC960_PD_privdata = {
3193 .hw_init = DAC960_PD_hw_init,
3194 .irq_handler = DAC960_PD_intr_handler,
3195 .mmio_size = DAC960_PD_mmio_size,
3200 * DAC960 P Series Controllers
3202 * Similar to the DAC960 PD Series Controllers, but some commands have
3206 static inline void myrb_translate_enquiry(void *enq)
3208 memcpy(enq + 132, enq + 36, 64);
3209 memset(enq + 36, 0, 96);
3212 static inline void myrb_translate_devstate(void *state)
3214 memcpy(state + 2, state + 3, 1);
3215 memmove(state + 4, state + 5, 2);
3216 memmove(state + 6, state + 8, 4);
3219 static inline void myrb_translate_to_rw_command(struct myrb_cmdblk *cmd_blk)
3221 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3222 int ldev_num = mbox->type5.ld.ldev_num;
3224 mbox->bytes[3] &= 0x7;
3225 mbox->bytes[3] |= mbox->bytes[7] << 6;
3226 mbox->bytes[7] = ldev_num;
3229 static inline void myrb_translate_from_rw_command(struct myrb_cmdblk *cmd_blk)
3231 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3232 int ldev_num = mbox->bytes[7];
3234 mbox->bytes[7] = mbox->bytes[3] >> 6;
3235 mbox->bytes[3] &= 0x7;
3236 mbox->bytes[3] |= ldev_num << 3;
3239 static void DAC960_P_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
3241 void __iomem *base = cb->io_base;
3242 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3244 switch (mbox->common.opcode) {
3245 case MYRB_CMD_ENQUIRY:
3246 mbox->common.opcode = MYRB_CMD_ENQUIRY_OLD;
3248 case MYRB_CMD_GET_DEVICE_STATE:
3249 mbox->common.opcode = MYRB_CMD_GET_DEVICE_STATE_OLD;
3252 mbox->common.opcode = MYRB_CMD_READ_OLD;
3253 myrb_translate_to_rw_command(cmd_blk);
3255 case MYRB_CMD_WRITE:
3256 mbox->common.opcode = MYRB_CMD_WRITE_OLD;
3257 myrb_translate_to_rw_command(cmd_blk);
3259 case MYRB_CMD_READ_SG:
3260 mbox->common.opcode = MYRB_CMD_READ_SG_OLD;
3261 myrb_translate_to_rw_command(cmd_blk);
3263 case MYRB_CMD_WRITE_SG:
3264 mbox->common.opcode = MYRB_CMD_WRITE_SG_OLD;
3265 myrb_translate_to_rw_command(cmd_blk);
3270 while (DAC960_PD_hw_mbox_is_full(base))
3272 DAC960_PD_write_cmd_mbox(base, mbox);
3273 DAC960_PD_hw_mbox_new_cmd(base);
3277 static int DAC960_P_hw_init(struct pci_dev *pdev,
3278 struct myrb_hba *cb, void __iomem *base)
3281 unsigned char error, parm0, parm1;
3283 if (!request_region(cb->io_addr, 0x80, "myrb")) {
3284 dev_err(&pdev->dev, "IO port 0x%lx busy\n",
3285 (unsigned long)cb->io_addr);
3288 DAC960_PD_disable_intr(base);
3289 DAC960_PD_ack_hw_mbox_status(base);
3291 while (DAC960_PD_init_in_progress(base) &&
3292 timeout < MYRB_MAILBOX_TIMEOUT) {
3293 if (DAC960_PD_read_error_status(base, &error,
3295 myrb_err_status(cb, error, parm0, parm1))
3300 if (timeout == MYRB_MAILBOX_TIMEOUT) {
3302 "Timeout waiting for Controller Initialisation\n");
3305 if (!myrb_enable_mmio(cb, NULL)) {
3307 "Unable to allocate DMA mapped memory\n");
3308 DAC960_PD_reset_ctrl(base);
3311 DAC960_PD_enable_intr(base);
3312 cb->qcmd = DAC960_P_qcmd;
3313 cb->disable_intr = DAC960_PD_disable_intr;
3314 cb->reset = DAC960_PD_reset_ctrl;
3319 static irqreturn_t DAC960_P_intr_handler(int irq, void *arg)
3321 struct myrb_hba *cb = arg;
3322 void __iomem *base = cb->io_base;
3323 unsigned long flags;
3325 spin_lock_irqsave(&cb->queue_lock, flags);
3326 while (DAC960_PD_hw_mbox_status_available(base)) {
3327 unsigned char id = DAC960_PD_read_status_cmd_ident(base);
3328 struct scsi_cmnd *scmd = NULL;
3329 struct myrb_cmdblk *cmd_blk = NULL;
3330 union myrb_cmd_mbox *mbox;
3331 enum myrb_cmd_opcode op;
3334 if (id == MYRB_DCMD_TAG)
3335 cmd_blk = &cb->dcmd_blk;
3336 else if (id == MYRB_MCMD_TAG)
3337 cmd_blk = &cb->mcmd_blk;
3339 scmd = scsi_host_find_tag(cb->host, id - 3);
3341 cmd_blk = scsi_cmd_priv(scmd);
3344 cmd_blk->status = DAC960_PD_read_status(base);
3346 dev_err(&cb->pdev->dev,
3347 "Unhandled command completion %d\n", id);
3349 DAC960_PD_ack_intr(base);
3350 DAC960_PD_ack_hw_mbox_status(base);
3355 mbox = &cmd_blk->mbox;
3356 op = mbox->common.opcode;
3358 case MYRB_CMD_ENQUIRY_OLD:
3359 mbox->common.opcode = MYRB_CMD_ENQUIRY;
3360 myrb_translate_enquiry(cb->enquiry);
3362 case MYRB_CMD_READ_OLD:
3363 mbox->common.opcode = MYRB_CMD_READ;
3364 myrb_translate_from_rw_command(cmd_blk);
3366 case MYRB_CMD_WRITE_OLD:
3367 mbox->common.opcode = MYRB_CMD_WRITE;
3368 myrb_translate_from_rw_command(cmd_blk);
3370 case MYRB_CMD_READ_SG_OLD:
3371 mbox->common.opcode = MYRB_CMD_READ_SG;
3372 myrb_translate_from_rw_command(cmd_blk);
3374 case MYRB_CMD_WRITE_SG_OLD:
3375 mbox->common.opcode = MYRB_CMD_WRITE_SG;
3376 myrb_translate_from_rw_command(cmd_blk);
3382 myrb_handle_cmdblk(cb, cmd_blk);
3384 myrb_handle_scsi(cb, cmd_blk, scmd);
3386 spin_unlock_irqrestore(&cb->queue_lock, flags);
3390 static struct myrb_privdata DAC960_P_privdata = {
3391 .hw_init = DAC960_P_hw_init,
3392 .irq_handler = DAC960_P_intr_handler,
3393 .mmio_size = DAC960_PD_mmio_size,
3396 static struct myrb_hba *myrb_detect(struct pci_dev *pdev,
3397 const struct pci_device_id *entry)
3399 struct myrb_privdata *privdata =
3400 (struct myrb_privdata *)entry->driver_data;
3401 irq_handler_t irq_handler = privdata->irq_handler;
3402 unsigned int mmio_size = privdata->mmio_size;
3403 struct Scsi_Host *shost;
3404 struct myrb_hba *cb = NULL;
3406 shost = scsi_host_alloc(&myrb_template, sizeof(struct myrb_hba));
3408 dev_err(&pdev->dev, "Unable to allocate Controller\n");
3411 shost->max_cmd_len = 12;
3412 shost->max_lun = 256;
3413 cb = shost_priv(shost);
3414 mutex_init(&cb->dcmd_mutex);
3415 mutex_init(&cb->dma_mutex);
3419 if (pci_enable_device(pdev)) {
3420 dev_err(&pdev->dev, "Failed to enable PCI device\n");
3421 scsi_host_put(shost);
3425 if (privdata->hw_init == DAC960_PD_hw_init ||
3426 privdata->hw_init == DAC960_P_hw_init) {
3427 cb->io_addr = pci_resource_start(pdev, 0);
3428 cb->pci_addr = pci_resource_start(pdev, 1);
3430 cb->pci_addr = pci_resource_start(pdev, 0);
3432 pci_set_drvdata(pdev, cb);
3433 spin_lock_init(&cb->queue_lock);
3434 if (mmio_size < PAGE_SIZE)
3435 mmio_size = PAGE_SIZE;
3436 cb->mmio_base = ioremap(cb->pci_addr & PAGE_MASK, mmio_size);
3437 if (cb->mmio_base == NULL) {
3439 "Unable to map Controller Register Window\n");
3443 cb->io_base = cb->mmio_base + (cb->pci_addr & ~PAGE_MASK);
3444 if (privdata->hw_init(pdev, cb, cb->io_base))
3447 if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrb", cb) < 0) {
3449 "Unable to acquire IRQ Channel %d\n", pdev->irq);
3452 cb->irq = pdev->irq;
3457 "Failed to initialize Controller\n");
3462 static int myrb_probe(struct pci_dev *dev, const struct pci_device_id *entry)
3464 struct myrb_hba *cb;
3467 cb = myrb_detect(dev, entry);
3471 ret = myrb_get_hba_config(cb);
3477 if (!myrb_create_mempools(dev, cb)) {
3482 ret = scsi_add_host(cb->host, &dev->dev);
3484 dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret);
3485 myrb_destroy_mempools(cb);
3488 scsi_scan_host(cb->host);
3496 static void myrb_remove(struct pci_dev *pdev)
3498 struct myrb_hba *cb = pci_get_drvdata(pdev);
3500 shost_printk(KERN_NOTICE, cb->host, "Flushing Cache...");
3501 myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
3503 myrb_destroy_mempools(cb);
3507 static const struct pci_device_id myrb_id_table[] = {
3509 PCI_DEVICE_SUB(PCI_VENDOR_ID_DEC,
3510 PCI_DEVICE_ID_DEC_21285,
3511 PCI_VENDOR_ID_MYLEX,
3512 PCI_DEVICE_ID_MYLEX_DAC960_LA),
3513 .driver_data = (unsigned long) &DAC960_LA_privdata,
3516 PCI_DEVICE_DATA(MYLEX, DAC960_PG, &DAC960_PG_privdata),
3519 PCI_DEVICE_DATA(MYLEX, DAC960_PD, &DAC960_PD_privdata),
3522 PCI_DEVICE_DATA(MYLEX, DAC960_P, &DAC960_P_privdata),
3527 MODULE_DEVICE_TABLE(pci, myrb_id_table);
3529 static struct pci_driver myrb_pci_driver = {
3531 .id_table = myrb_id_table,
3532 .probe = myrb_probe,
3533 .remove = myrb_remove,
3536 static int __init myrb_init_module(void)
3540 myrb_raid_template = raid_class_attach(&myrb_raid_functions);
3541 if (!myrb_raid_template)
3544 ret = pci_register_driver(&myrb_pci_driver);
3546 raid_class_release(myrb_raid_template);
3551 static void __exit myrb_cleanup_module(void)
3553 pci_unregister_driver(&myrb_pci_driver);
3554 raid_class_release(myrb_raid_template);
3557 module_init(myrb_init_module);
3558 module_exit(myrb_cleanup_module);
3560 MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (Block interface)");
3561 MODULE_AUTHOR("Hannes Reinecke <hare@suse.com>");
3562 MODULE_LICENSE("GPL");