1 // SPDX-License-Identifier: GPL-2.0
3 * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
5 * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
7 * Based on the original DAC960 driver,
8 * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
9 * Portions Copyright 2002 by Mylex (An IBM Business Unit)
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/pci.h>
18 #include <linux/raid_class.h>
19 #include <asm/unaligned.h>
20 #include <scsi/scsi.h>
21 #include <scsi/scsi_host.h>
22 #include <scsi/scsi_device.h>
23 #include <scsi/scsi_cmnd.h>
24 #include <scsi/scsi_tcq.h>
27 static struct raid_template *myrb_raid_template;
29 static void myrb_monitor(struct work_struct *work);
30 static inline void myrb_translate_devstate(void *DeviceState);
32 static inline int myrb_logical_channel(struct Scsi_Host *shost)
34 return shost->max_channel - 1;
37 static struct myrb_devstate_name_entry {
38 enum myrb_devstate state;
40 } myrb_devstate_name_list[] = {
41 { MYRB_DEVICE_DEAD, "Dead" },
42 { MYRB_DEVICE_WO, "WriteOnly" },
43 { MYRB_DEVICE_ONLINE, "Online" },
44 { MYRB_DEVICE_CRITICAL, "Critical" },
45 { MYRB_DEVICE_STANDBY, "Standby" },
46 { MYRB_DEVICE_OFFLINE, "Offline" },
49 static const char *myrb_devstate_name(enum myrb_devstate state)
51 struct myrb_devstate_name_entry *entry = myrb_devstate_name_list;
54 for (i = 0; i < ARRAY_SIZE(myrb_devstate_name_list); i++) {
55 if (entry[i].state == state)
61 static struct myrb_raidlevel_name_entry {
62 enum myrb_raidlevel level;
64 } myrb_raidlevel_name_list[] = {
65 { MYRB_RAID_LEVEL0, "RAID0" },
66 { MYRB_RAID_LEVEL1, "RAID1" },
67 { MYRB_RAID_LEVEL3, "RAID3" },
68 { MYRB_RAID_LEVEL5, "RAID5" },
69 { MYRB_RAID_LEVEL6, "RAID6" },
70 { MYRB_RAID_JBOD, "JBOD" },
73 static const char *myrb_raidlevel_name(enum myrb_raidlevel level)
75 struct myrb_raidlevel_name_entry *entry = myrb_raidlevel_name_list;
78 for (i = 0; i < ARRAY_SIZE(myrb_raidlevel_name_list); i++) {
79 if (entry[i].level == level)
86 * myrb_create_mempools - allocates auxiliary data structures
88 * Return: true on success, false otherwise.
90 static bool myrb_create_mempools(struct pci_dev *pdev, struct myrb_hba *cb)
92 size_t elem_size, elem_align;
94 elem_align = sizeof(struct myrb_sge);
95 elem_size = cb->host->sg_tablesize * elem_align;
96 cb->sg_pool = dma_pool_create("myrb_sg", &pdev->dev,
97 elem_size, elem_align, 0);
98 if (cb->sg_pool == NULL) {
99 shost_printk(KERN_ERR, cb->host,
100 "Failed to allocate SG pool\n");
104 cb->dcdb_pool = dma_pool_create("myrb_dcdb", &pdev->dev,
105 sizeof(struct myrb_dcdb),
106 sizeof(unsigned int), 0);
107 if (!cb->dcdb_pool) {
108 dma_pool_destroy(cb->sg_pool);
110 shost_printk(KERN_ERR, cb->host,
111 "Failed to allocate DCDB pool\n");
115 snprintf(cb->work_q_name, sizeof(cb->work_q_name),
116 "myrb_wq_%d", cb->host->host_no);
117 cb->work_q = create_singlethread_workqueue(cb->work_q_name);
119 dma_pool_destroy(cb->dcdb_pool);
120 cb->dcdb_pool = NULL;
121 dma_pool_destroy(cb->sg_pool);
123 shost_printk(KERN_ERR, cb->host,
124 "Failed to create workqueue\n");
129 * Initialize the Monitoring Timer.
131 INIT_DELAYED_WORK(&cb->monitor_work, myrb_monitor);
132 queue_delayed_work(cb->work_q, &cb->monitor_work, 1);
138 * myrb_destroy_mempools - tears down the memory pools for the controller
140 static void myrb_destroy_mempools(struct myrb_hba *cb)
142 cancel_delayed_work_sync(&cb->monitor_work);
143 destroy_workqueue(cb->work_q);
145 dma_pool_destroy(cb->sg_pool);
146 dma_pool_destroy(cb->dcdb_pool);
150 * myrb_reset_cmd - reset command block
152 static inline void myrb_reset_cmd(struct myrb_cmdblk *cmd_blk)
154 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
156 memset(mbox, 0, sizeof(union myrb_cmd_mbox));
161 * myrb_qcmd - queues command block for execution
163 static void myrb_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
165 void __iomem *base = cb->io_base;
166 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
167 union myrb_cmd_mbox *next_mbox = cb->next_cmd_mbox;
169 cb->write_cmd_mbox(next_mbox, mbox);
170 if (cb->prev_cmd_mbox1->words[0] == 0 ||
171 cb->prev_cmd_mbox2->words[0] == 0)
172 cb->get_cmd_mbox(base);
173 cb->prev_cmd_mbox2 = cb->prev_cmd_mbox1;
174 cb->prev_cmd_mbox1 = next_mbox;
175 if (++next_mbox > cb->last_cmd_mbox)
176 next_mbox = cb->first_cmd_mbox;
177 cb->next_cmd_mbox = next_mbox;
181 * myrb_exec_cmd - executes command block and waits for completion.
183 * Return: command status
185 static unsigned short myrb_exec_cmd(struct myrb_hba *cb,
186 struct myrb_cmdblk *cmd_blk)
188 DECLARE_COMPLETION_ONSTACK(cmpl);
191 cmd_blk->completion = &cmpl;
193 spin_lock_irqsave(&cb->queue_lock, flags);
194 cb->qcmd(cb, cmd_blk);
195 spin_unlock_irqrestore(&cb->queue_lock, flags);
197 wait_for_completion(&cmpl);
198 return cmd_blk->status;
202 * myrb_exec_type3 - executes a type 3 command and waits for completion.
204 * Return: command status
206 static unsigned short myrb_exec_type3(struct myrb_hba *cb,
207 enum myrb_cmd_opcode op, dma_addr_t addr)
209 struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
210 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
211 unsigned short status;
213 mutex_lock(&cb->dcmd_mutex);
214 myrb_reset_cmd(cmd_blk);
215 mbox->type3.id = MYRB_DCMD_TAG;
216 mbox->type3.opcode = op;
217 mbox->type3.addr = addr;
218 status = myrb_exec_cmd(cb, cmd_blk);
219 mutex_unlock(&cb->dcmd_mutex);
224 * myrb_exec_type3D - executes a type 3D command and waits for completion.
226 * Return: command status
228 static unsigned short myrb_exec_type3D(struct myrb_hba *cb,
229 enum myrb_cmd_opcode op, struct scsi_device *sdev,
230 struct myrb_pdev_state *pdev_info)
232 struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
233 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
234 unsigned short status;
235 dma_addr_t pdev_info_addr;
237 pdev_info_addr = dma_map_single(&cb->pdev->dev, pdev_info,
238 sizeof(struct myrb_pdev_state),
240 if (dma_mapping_error(&cb->pdev->dev, pdev_info_addr))
241 return MYRB_STATUS_SUBSYS_FAILED;
243 mutex_lock(&cb->dcmd_mutex);
244 myrb_reset_cmd(cmd_blk);
245 mbox->type3D.id = MYRB_DCMD_TAG;
246 mbox->type3D.opcode = op;
247 mbox->type3D.channel = sdev->channel;
248 mbox->type3D.target = sdev->id;
249 mbox->type3D.addr = pdev_info_addr;
250 status = myrb_exec_cmd(cb, cmd_blk);
251 mutex_unlock(&cb->dcmd_mutex);
252 dma_unmap_single(&cb->pdev->dev, pdev_info_addr,
253 sizeof(struct myrb_pdev_state), DMA_FROM_DEVICE);
254 if (status == MYRB_STATUS_SUCCESS &&
255 mbox->type3D.opcode == MYRB_CMD_GET_DEVICE_STATE_OLD)
256 myrb_translate_devstate(pdev_info);
261 static char *myrb_event_msg[] = {
262 "killed because write recovery failed",
263 "killed because of SCSI bus reset failure",
264 "killed because of double check condition",
265 "killed because it was removed",
266 "killed because of gross error on SCSI chip",
267 "killed because of bad tag returned from drive",
268 "killed because of timeout on SCSI command",
269 "killed because of reset SCSI command issued from system",
270 "killed because busy or parity error count exceeded limit",
271 "killed because of 'kill drive' command from system",
272 "killed because of selection timeout",
273 "killed due to SCSI phase sequence error",
274 "killed due to unknown status",
278 * myrb_get_event - get event log from HBA
279 * @cb: pointer to the hba structure
280 * @event: number of the event
282 * Execute a type 3E command and logs the event message
284 static void myrb_get_event(struct myrb_hba *cb, unsigned int event)
286 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
287 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
288 struct myrb_log_entry *ev_buf;
290 unsigned short status;
292 ev_buf = dma_alloc_coherent(&cb->pdev->dev,
293 sizeof(struct myrb_log_entry),
294 &ev_addr, GFP_KERNEL);
298 myrb_reset_cmd(cmd_blk);
299 mbox->type3E.id = MYRB_MCMD_TAG;
300 mbox->type3E.opcode = MYRB_CMD_EVENT_LOG_OPERATION;
301 mbox->type3E.optype = DAC960_V1_GetEventLogEntry;
302 mbox->type3E.opqual = 1;
303 mbox->type3E.ev_seq = event;
304 mbox->type3E.addr = ev_addr;
305 status = myrb_exec_cmd(cb, cmd_blk);
306 if (status != MYRB_STATUS_SUCCESS)
307 shost_printk(KERN_INFO, cb->host,
308 "Failed to get event log %d, status %04x\n",
311 else if (ev_buf->seq_num == event) {
312 struct scsi_sense_hdr sshdr;
314 memset(&sshdr, 0, sizeof(sshdr));
315 scsi_normalize_sense(ev_buf->sense, 32, &sshdr);
317 if (sshdr.sense_key == VENDOR_SPECIFIC &&
319 sshdr.ascq < ARRAY_SIZE(myrb_event_msg))
320 shost_printk(KERN_CRIT, cb->host,
321 "Physical drive %d:%d: %s\n",
322 ev_buf->channel, ev_buf->target,
323 myrb_event_msg[sshdr.ascq]);
325 shost_printk(KERN_CRIT, cb->host,
326 "Physical drive %d:%d: Sense: %X/%02X/%02X\n",
327 ev_buf->channel, ev_buf->target,
328 sshdr.sense_key, sshdr.asc, sshdr.ascq);
331 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_log_entry),
336 * myrb_get_errtable - retrieves the error table from the controller
338 * Executes a type 3 command and logs the error table from the controller.
340 static void myrb_get_errtable(struct myrb_hba *cb)
342 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
343 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
344 unsigned short status;
345 struct myrb_error_entry old_table[MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS];
347 memcpy(&old_table, cb->err_table, sizeof(old_table));
349 myrb_reset_cmd(cmd_blk);
350 mbox->type3.id = MYRB_MCMD_TAG;
351 mbox->type3.opcode = MYRB_CMD_GET_ERROR_TABLE;
352 mbox->type3.addr = cb->err_table_addr;
353 status = myrb_exec_cmd(cb, cmd_blk);
354 if (status == MYRB_STATUS_SUCCESS) {
355 struct myrb_error_entry *table = cb->err_table;
356 struct myrb_error_entry *new, *old;
357 size_t err_table_offset;
358 struct scsi_device *sdev;
360 shost_for_each_device(sdev, cb->host) {
361 if (sdev->channel >= myrb_logical_channel(cb->host))
363 err_table_offset = sdev->channel * MYRB_MAX_TARGETS
365 new = table + err_table_offset;
366 old = &old_table[err_table_offset];
367 if (new->parity_err == old->parity_err &&
368 new->soft_err == old->soft_err &&
369 new->hard_err == old->hard_err &&
370 new->misc_err == old->misc_err)
372 sdev_printk(KERN_CRIT, sdev,
373 "Errors: Parity = %d, Soft = %d, Hard = %d, Misc = %d\n",
374 new->parity_err, new->soft_err,
375 new->hard_err, new->misc_err);
381 * myrb_get_ldev_info - retrieves the logical device table from the controller
383 * Executes a type 3 command and updates the logical device table.
385 * Return: command status
387 static unsigned short myrb_get_ldev_info(struct myrb_hba *cb)
389 unsigned short status;
390 int ldev_num, ldev_cnt = cb->enquiry->ldev_count;
391 struct Scsi_Host *shost = cb->host;
393 status = myrb_exec_type3(cb, MYRB_CMD_GET_LDEV_INFO,
395 if (status != MYRB_STATUS_SUCCESS)
398 for (ldev_num = 0; ldev_num < ldev_cnt; ldev_num++) {
399 struct myrb_ldev_info *old = NULL;
400 struct myrb_ldev_info *new = cb->ldev_info_buf + ldev_num;
401 struct scsi_device *sdev;
403 sdev = scsi_device_lookup(shost, myrb_logical_channel(shost),
406 if (new->state == MYRB_DEVICE_OFFLINE)
408 shost_printk(KERN_INFO, shost,
409 "Adding Logical Drive %d in state %s\n",
410 ldev_num, myrb_devstate_name(new->state));
411 scsi_add_device(shost, myrb_logical_channel(shost),
415 old = sdev->hostdata;
416 if (new->state != old->state)
417 shost_printk(KERN_INFO, shost,
418 "Logical Drive %d is now %s\n",
419 ldev_num, myrb_devstate_name(new->state));
420 if (new->wb_enabled != old->wb_enabled)
421 sdev_printk(KERN_INFO, sdev,
422 "Logical Drive is now WRITE %s\n",
423 (new->wb_enabled ? "BACK" : "THRU"));
424 memcpy(old, new, sizeof(*new));
425 scsi_device_put(sdev);
431 * myrb_get_rbld_progress - get rebuild progress information
433 * Executes a type 3 command and returns the rebuild progress
436 * Return: command status
438 static unsigned short myrb_get_rbld_progress(struct myrb_hba *cb,
439 struct myrb_rbld_progress *rbld)
441 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
442 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
443 struct myrb_rbld_progress *rbld_buf;
444 dma_addr_t rbld_addr;
445 unsigned short status;
447 rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
448 sizeof(struct myrb_rbld_progress),
449 &rbld_addr, GFP_KERNEL);
451 return MYRB_STATUS_RBLD_NOT_CHECKED;
453 myrb_reset_cmd(cmd_blk);
454 mbox->type3.id = MYRB_MCMD_TAG;
455 mbox->type3.opcode = MYRB_CMD_GET_REBUILD_PROGRESS;
456 mbox->type3.addr = rbld_addr;
457 status = myrb_exec_cmd(cb, cmd_blk);
459 memcpy(rbld, rbld_buf, sizeof(struct myrb_rbld_progress));
460 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
461 rbld_buf, rbld_addr);
466 * myrb_update_rbld_progress - updates the rebuild status
468 * Updates the rebuild status for the attached logical devices.
470 static void myrb_update_rbld_progress(struct myrb_hba *cb)
472 struct myrb_rbld_progress rbld_buf;
473 unsigned short status;
475 status = myrb_get_rbld_progress(cb, &rbld_buf);
476 if (status == MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS &&
477 cb->last_rbld_status == MYRB_STATUS_SUCCESS)
478 status = MYRB_STATUS_RBLD_SUCCESS;
479 if (status != MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS) {
480 unsigned int blocks_done =
481 rbld_buf.ldev_size - rbld_buf.blocks_left;
482 struct scsi_device *sdev;
484 sdev = scsi_device_lookup(cb->host,
485 myrb_logical_channel(cb->host),
486 rbld_buf.ldev_num, 0);
491 case MYRB_STATUS_SUCCESS:
492 sdev_printk(KERN_INFO, sdev,
493 "Rebuild in Progress, %d%% completed\n",
494 (100 * (blocks_done >> 7))
495 / (rbld_buf.ldev_size >> 7));
497 case MYRB_STATUS_RBLD_FAILED_LDEV_FAILURE:
498 sdev_printk(KERN_INFO, sdev,
499 "Rebuild Failed due to Logical Drive Failure\n");
501 case MYRB_STATUS_RBLD_FAILED_BADBLOCKS:
502 sdev_printk(KERN_INFO, sdev,
503 "Rebuild Failed due to Bad Blocks on Other Drives\n");
505 case MYRB_STATUS_RBLD_FAILED_NEW_DRIVE_FAILED:
506 sdev_printk(KERN_INFO, sdev,
507 "Rebuild Failed due to Failure of Drive Being Rebuilt\n");
509 case MYRB_STATUS_RBLD_SUCCESS:
510 sdev_printk(KERN_INFO, sdev,
511 "Rebuild Completed Successfully\n");
513 case MYRB_STATUS_RBLD_SUCCESS_TERMINATED:
514 sdev_printk(KERN_INFO, sdev,
515 "Rebuild Successfully Terminated\n");
520 scsi_device_put(sdev);
522 cb->last_rbld_status = status;
526 * myrb_get_cc_progress - retrieve the rebuild status
528 * Execute a type 3 Command and fetch the rebuild / consistency check
531 static void myrb_get_cc_progress(struct myrb_hba *cb)
533 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
534 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
535 struct myrb_rbld_progress *rbld_buf;
536 dma_addr_t rbld_addr;
537 unsigned short status;
539 rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
540 sizeof(struct myrb_rbld_progress),
541 &rbld_addr, GFP_KERNEL);
543 cb->need_cc_status = true;
546 myrb_reset_cmd(cmd_blk);
547 mbox->type3.id = MYRB_MCMD_TAG;
548 mbox->type3.opcode = MYRB_CMD_REBUILD_STAT;
549 mbox->type3.addr = rbld_addr;
550 status = myrb_exec_cmd(cb, cmd_blk);
551 if (status == MYRB_STATUS_SUCCESS) {
552 unsigned int ldev_num = rbld_buf->ldev_num;
553 unsigned int ldev_size = rbld_buf->ldev_size;
554 unsigned int blocks_done =
555 ldev_size - rbld_buf->blocks_left;
556 struct scsi_device *sdev;
558 sdev = scsi_device_lookup(cb->host,
559 myrb_logical_channel(cb->host),
562 sdev_printk(KERN_INFO, sdev,
563 "Consistency Check in Progress: %d%% completed\n",
564 (100 * (blocks_done >> 7))
566 scsi_device_put(sdev);
569 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
570 rbld_buf, rbld_addr);
574 * myrb_bgi_control - updates background initialisation status
576 * Executes a type 3B command and updates the background initialisation status
578 static void myrb_bgi_control(struct myrb_hba *cb)
580 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
581 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
582 struct myrb_bgi_status *bgi, *last_bgi;
584 struct scsi_device *sdev = NULL;
585 unsigned short status;
587 bgi = dma_alloc_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
588 &bgi_addr, GFP_KERNEL);
590 shost_printk(KERN_ERR, cb->host,
591 "Failed to allocate bgi memory\n");
594 myrb_reset_cmd(cmd_blk);
595 mbox->type3B.id = MYRB_DCMD_TAG;
596 mbox->type3B.opcode = MYRB_CMD_BGI_CONTROL;
597 mbox->type3B.optype = 0x20;
598 mbox->type3B.addr = bgi_addr;
599 status = myrb_exec_cmd(cb, cmd_blk);
600 last_bgi = &cb->bgi_status;
601 sdev = scsi_device_lookup(cb->host,
602 myrb_logical_channel(cb->host),
605 case MYRB_STATUS_SUCCESS:
606 switch (bgi->status) {
607 case MYRB_BGI_INVALID:
609 case MYRB_BGI_STARTED:
612 sdev_printk(KERN_INFO, sdev,
613 "Background Initialization Started\n");
615 case MYRB_BGI_INPROGRESS:
618 if (bgi->blocks_done == last_bgi->blocks_done &&
619 bgi->ldev_num == last_bgi->ldev_num)
621 sdev_printk(KERN_INFO, sdev,
622 "Background Initialization in Progress: %d%% completed\n",
623 (100 * (bgi->blocks_done >> 7))
624 / (bgi->ldev_size >> 7));
626 case MYRB_BGI_SUSPENDED:
629 sdev_printk(KERN_INFO, sdev,
630 "Background Initialization Suspended\n");
632 case MYRB_BGI_CANCELLED:
635 sdev_printk(KERN_INFO, sdev,
636 "Background Initialization Cancelled\n");
639 memcpy(&cb->bgi_status, bgi, sizeof(struct myrb_bgi_status));
641 case MYRB_STATUS_BGI_SUCCESS:
642 if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
643 sdev_printk(KERN_INFO, sdev,
644 "Background Initialization Completed Successfully\n");
645 cb->bgi_status.status = MYRB_BGI_INVALID;
647 case MYRB_STATUS_BGI_ABORTED:
648 if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
649 sdev_printk(KERN_INFO, sdev,
650 "Background Initialization Aborted\n");
652 case MYRB_STATUS_NO_BGI_INPROGRESS:
653 cb->bgi_status.status = MYRB_BGI_INVALID;
657 scsi_device_put(sdev);
658 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
663 * myrb_hba_enquiry - updates the controller status
665 * Executes a DAC_V1_Enquiry command and updates the controller status.
667 * Return: command status
669 static unsigned short myrb_hba_enquiry(struct myrb_hba *cb)
671 struct myrb_enquiry old, *new;
672 unsigned short status;
674 memcpy(&old, cb->enquiry, sizeof(struct myrb_enquiry));
676 status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY, cb->enquiry_addr);
677 if (status != MYRB_STATUS_SUCCESS)
681 if (new->ldev_count > old.ldev_count) {
682 int ldev_num = old.ldev_count - 1;
684 while (++ldev_num < new->ldev_count)
685 shost_printk(KERN_CRIT, cb->host,
686 "Logical Drive %d Now Exists\n",
689 if (new->ldev_count < old.ldev_count) {
690 int ldev_num = new->ldev_count - 1;
692 while (++ldev_num < old.ldev_count)
693 shost_printk(KERN_CRIT, cb->host,
694 "Logical Drive %d No Longer Exists\n",
697 if (new->status.deferred != old.status.deferred)
698 shost_printk(KERN_CRIT, cb->host,
699 "Deferred Write Error Flag is now %s\n",
700 (new->status.deferred ? "TRUE" : "FALSE"));
701 if (new->ev_seq != old.ev_seq) {
702 cb->new_ev_seq = new->ev_seq;
703 cb->need_err_info = true;
704 shost_printk(KERN_INFO, cb->host,
705 "Event log %d/%d (%d/%d) available\n",
706 cb->old_ev_seq, cb->new_ev_seq,
707 old.ev_seq, new->ev_seq);
709 if ((new->ldev_critical > 0 &&
710 new->ldev_critical != old.ldev_critical) ||
711 (new->ldev_offline > 0 &&
712 new->ldev_offline != old.ldev_offline) ||
713 (new->ldev_count != old.ldev_count)) {
714 shost_printk(KERN_INFO, cb->host,
715 "Logical drive count changed (%d/%d/%d)\n",
719 cb->need_ldev_info = true;
721 if (new->pdev_dead > 0 ||
722 new->pdev_dead != old.pdev_dead ||
723 time_after_eq(jiffies, cb->secondary_monitor_time
724 + MYRB_SECONDARY_MONITOR_INTERVAL)) {
725 cb->need_bgi_status = cb->bgi_status_supported;
726 cb->secondary_monitor_time = jiffies;
728 if (new->rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
729 new->rbld == MYRB_BG_RBLD_IN_PROGRESS ||
730 old.rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
731 old.rbld == MYRB_BG_RBLD_IN_PROGRESS) {
732 cb->need_rbld = true;
733 cb->rbld_first = (new->ldev_critical < old.ldev_critical);
735 if (old.rbld == MYRB_BG_CHECK_IN_PROGRESS)
737 case MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS:
738 shost_printk(KERN_INFO, cb->host,
739 "Consistency Check Completed Successfully\n");
741 case MYRB_STDBY_RBLD_IN_PROGRESS:
742 case MYRB_BG_RBLD_IN_PROGRESS:
744 case MYRB_BG_CHECK_IN_PROGRESS:
745 cb->need_cc_status = true;
747 case MYRB_STDBY_RBLD_COMPLETED_WITH_ERROR:
748 shost_printk(KERN_INFO, cb->host,
749 "Consistency Check Completed with Error\n");
751 case MYRB_BG_RBLD_OR_CHECK_FAILED_DRIVE_FAILED:
752 shost_printk(KERN_INFO, cb->host,
753 "Consistency Check Failed - Physical Device Failed\n");
755 case MYRB_BG_RBLD_OR_CHECK_FAILED_LDEV_FAILED:
756 shost_printk(KERN_INFO, cb->host,
757 "Consistency Check Failed - Logical Drive Failed\n");
759 case MYRB_BG_RBLD_OR_CHECK_FAILED_OTHER:
760 shost_printk(KERN_INFO, cb->host,
761 "Consistency Check Failed - Other Causes\n");
763 case MYRB_BG_RBLD_OR_CHECK_SUCCESS_TERMINATED:
764 shost_printk(KERN_INFO, cb->host,
765 "Consistency Check Successfully Terminated\n");
768 else if (new->rbld == MYRB_BG_CHECK_IN_PROGRESS)
769 cb->need_cc_status = true;
771 return MYRB_STATUS_SUCCESS;
775 * myrb_set_pdev_state - sets the device state for a physical device
777 * Return: command status
779 static unsigned short myrb_set_pdev_state(struct myrb_hba *cb,
780 struct scsi_device *sdev, enum myrb_devstate state)
782 struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
783 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
784 unsigned short status;
786 mutex_lock(&cb->dcmd_mutex);
787 mbox->type3D.opcode = MYRB_CMD_START_DEVICE;
788 mbox->type3D.id = MYRB_DCMD_TAG;
789 mbox->type3D.channel = sdev->channel;
790 mbox->type3D.target = sdev->id;
791 mbox->type3D.state = state & 0x1F;
792 status = myrb_exec_cmd(cb, cmd_blk);
793 mutex_unlock(&cb->dcmd_mutex);
799 * myrb_enable_mmio - enables the Memory Mailbox Interface
801 * PD and P controller types have no memory mailbox, but still need the
802 * other dma mapped memory.
804 * Return: true on success, false otherwise.
806 static bool myrb_enable_mmio(struct myrb_hba *cb, mbox_mmio_init_t mmio_init_fn)
808 void __iomem *base = cb->io_base;
809 struct pci_dev *pdev = cb->pdev;
810 size_t err_table_size;
811 size_t ldev_info_size;
812 union myrb_cmd_mbox *cmd_mbox_mem;
813 struct myrb_stat_mbox *stat_mbox_mem;
814 union myrb_cmd_mbox mbox;
815 unsigned short status;
817 memset(&mbox, 0, sizeof(union myrb_cmd_mbox));
819 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
820 dev_err(&pdev->dev, "DMA mask out of range\n");
824 cb->enquiry = dma_alloc_coherent(&pdev->dev,
825 sizeof(struct myrb_enquiry),
826 &cb->enquiry_addr, GFP_KERNEL);
830 err_table_size = sizeof(struct myrb_error_entry) *
831 MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
832 cb->err_table = dma_alloc_coherent(&pdev->dev, err_table_size,
833 &cb->err_table_addr, GFP_KERNEL);
837 ldev_info_size = sizeof(struct myrb_ldev_info) * MYRB_MAX_LDEVS;
838 cb->ldev_info_buf = dma_alloc_coherent(&pdev->dev, ldev_info_size,
839 &cb->ldev_info_addr, GFP_KERNEL);
840 if (!cb->ldev_info_buf)
844 * Skip mailbox initialisation for PD and P Controllers
849 /* These are the base addresses for the command memory mailbox array */
850 cb->cmd_mbox_size = MYRB_CMD_MBOX_COUNT * sizeof(union myrb_cmd_mbox);
851 cb->first_cmd_mbox = dma_alloc_coherent(&pdev->dev,
855 if (!cb->first_cmd_mbox)
858 cmd_mbox_mem = cb->first_cmd_mbox;
859 cmd_mbox_mem += MYRB_CMD_MBOX_COUNT - 1;
860 cb->last_cmd_mbox = cmd_mbox_mem;
861 cb->next_cmd_mbox = cb->first_cmd_mbox;
862 cb->prev_cmd_mbox1 = cb->last_cmd_mbox;
863 cb->prev_cmd_mbox2 = cb->last_cmd_mbox - 1;
865 /* These are the base addresses for the status memory mailbox array */
866 cb->stat_mbox_size = MYRB_STAT_MBOX_COUNT *
867 sizeof(struct myrb_stat_mbox);
868 cb->first_stat_mbox = dma_alloc_coherent(&pdev->dev,
872 if (!cb->first_stat_mbox)
875 stat_mbox_mem = cb->first_stat_mbox;
876 stat_mbox_mem += MYRB_STAT_MBOX_COUNT - 1;
877 cb->last_stat_mbox = stat_mbox_mem;
878 cb->next_stat_mbox = cb->first_stat_mbox;
880 /* Enable the Memory Mailbox Interface. */
881 cb->dual_mode_interface = true;
882 mbox.typeX.opcode = 0x2B;
884 mbox.typeX.opcode2 = 0x14;
885 mbox.typeX.cmd_mbox_addr = cb->cmd_mbox_addr;
886 mbox.typeX.stat_mbox_addr = cb->stat_mbox_addr;
888 status = mmio_init_fn(pdev, base, &mbox);
889 if (status != MYRB_STATUS_SUCCESS) {
890 cb->dual_mode_interface = false;
891 mbox.typeX.opcode2 = 0x10;
892 status = mmio_init_fn(pdev, base, &mbox);
893 if (status != MYRB_STATUS_SUCCESS) {
895 "Failed to enable mailbox, statux %02X\n",
904 * myrb_get_hba_config - reads the configuration information
906 * Reads the configuration information from the controller and
907 * initializes the controller structure.
909 * Return: 0 on success, errno otherwise
911 static int myrb_get_hba_config(struct myrb_hba *cb)
913 struct myrb_enquiry2 *enquiry2;
914 dma_addr_t enquiry2_addr;
915 struct myrb_config2 *config2;
916 dma_addr_t config2_addr;
917 struct Scsi_Host *shost = cb->host;
918 struct pci_dev *pdev = cb->pdev;
919 int pchan_max = 0, pchan_cur = 0;
920 unsigned short status;
921 int ret = -ENODEV, memsize = 0;
923 enquiry2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
924 &enquiry2_addr, GFP_KERNEL);
926 shost_printk(KERN_ERR, cb->host,
927 "Failed to allocate V1 enquiry2 memory\n");
930 config2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_config2),
931 &config2_addr, GFP_KERNEL);
933 shost_printk(KERN_ERR, cb->host,
934 "Failed to allocate V1 config2 memory\n");
935 dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
936 enquiry2, enquiry2_addr);
939 mutex_lock(&cb->dma_mutex);
940 status = myrb_hba_enquiry(cb);
941 mutex_unlock(&cb->dma_mutex);
942 if (status != MYRB_STATUS_SUCCESS) {
943 shost_printk(KERN_WARNING, cb->host,
944 "Failed it issue V1 Enquiry\n");
948 status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY2, enquiry2_addr);
949 if (status != MYRB_STATUS_SUCCESS) {
950 shost_printk(KERN_WARNING, cb->host,
951 "Failed to issue V1 Enquiry2\n");
955 status = myrb_exec_type3(cb, MYRB_CMD_READ_CONFIG2, config2_addr);
956 if (status != MYRB_STATUS_SUCCESS) {
957 shost_printk(KERN_WARNING, cb->host,
958 "Failed to issue ReadConfig2\n");
962 status = myrb_get_ldev_info(cb);
963 if (status != MYRB_STATUS_SUCCESS) {
964 shost_printk(KERN_WARNING, cb->host,
965 "Failed to get logical drive information\n");
970 * Initialize the Controller Model Name and Full Model Name fields.
972 switch (enquiry2->hw.sub_model) {
973 case DAC960_V1_P_PD_PU:
974 if (enquiry2->scsi_cap.bus_speed == MYRB_SCSI_SPEED_ULTRA)
975 strcpy(cb->model_name, "DAC960PU");
977 strcpy(cb->model_name, "DAC960PD");
980 strcpy(cb->model_name, "DAC960PL");
983 strcpy(cb->model_name, "DAC960PG");
986 strcpy(cb->model_name, "DAC960PJ");
989 strcpy(cb->model_name, "DAC960PR");
992 strcpy(cb->model_name, "DAC960PT");
995 strcpy(cb->model_name, "DAC960PTL0");
998 strcpy(cb->model_name, "DAC960PRL");
1000 case DAC960_V1_PTL1:
1001 strcpy(cb->model_name, "DAC960PTL1");
1003 case DAC960_V1_1164P:
1004 strcpy(cb->model_name, "eXtremeRAID 1100");
1007 shost_printk(KERN_WARNING, cb->host,
1008 "Unknown Model %X\n",
1009 enquiry2->hw.sub_model);
1013 * Initialize the Controller Firmware Version field and verify that it
1014 * is a supported firmware version.
1015 * The supported firmware versions are:
1017 * DAC1164P 5.06 and above
1018 * DAC960PTL/PRL/PJ/PG 4.06 and above
1019 * DAC960PU/PD/PL 3.51 and above
1020 * DAC960PU/PD/PL/P 2.73 and above
1022 #if defined(CONFIG_ALPHA)
1024 * DEC Alpha machines were often equipped with DAC960 cards that were
1025 * OEMed from Mylex, and had their own custom firmware. Version 2.70,
1026 * the last custom FW revision to be released by DEC for these older
1027 * controllers, appears to work quite well with this driver.
1029 * Cards tested successfully were several versions each of the PD and
1030 * PU, called by DEC the KZPSC and KZPAC, respectively, and having
1031 * the Manufacturer Numbers (from Mylex), usually on a sticker on the
1032 * back of the board, of:
1034 * KZPSC: D040347 (1-channel) or D040348 (2-channel)
1035 * or D040349 (3-channel)
1036 * KZPAC: D040395 (1-channel) or D040396 (2-channel)
1037 * or D040397 (3-channel)
1039 # define FIRMWARE_27X "2.70"
1041 # define FIRMWARE_27X "2.73"
1044 if (enquiry2->fw.major_version == 0) {
1045 enquiry2->fw.major_version = cb->enquiry->fw_major_version;
1046 enquiry2->fw.minor_version = cb->enquiry->fw_minor_version;
1047 enquiry2->fw.firmware_type = '0';
1048 enquiry2->fw.turn_id = 0;
1050 snprintf(cb->fw_version, sizeof(cb->fw_version),
1052 enquiry2->fw.major_version,
1053 enquiry2->fw.minor_version,
1054 enquiry2->fw.firmware_type,
1055 enquiry2->fw.turn_id);
1056 if (!((enquiry2->fw.major_version == 5 &&
1057 enquiry2->fw.minor_version >= 6) ||
1058 (enquiry2->fw.major_version == 4 &&
1059 enquiry2->fw.minor_version >= 6) ||
1060 (enquiry2->fw.major_version == 3 &&
1061 enquiry2->fw.minor_version >= 51) ||
1062 (enquiry2->fw.major_version == 2 &&
1063 strcmp(cb->fw_version, FIRMWARE_27X) >= 0))) {
1064 shost_printk(KERN_WARNING, cb->host,
1065 "Firmware Version '%s' unsupported\n",
1070 * Initialize the Channels, Targets, Memory Size, and SAF-TE
1071 * Enclosure Management Enabled fields.
1073 switch (enquiry2->hw.model) {
1074 case MYRB_5_CHANNEL_BOARD:
1077 case MYRB_3_CHANNEL_BOARD:
1078 case MYRB_3_CHANNEL_ASIC_DAC:
1081 case MYRB_2_CHANNEL_BOARD:
1085 pchan_max = enquiry2->cfg_chan;
1088 pchan_cur = enquiry2->cur_chan;
1089 if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_32BIT)
1091 else if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_16BIT)
1095 cb->ldev_block_size = enquiry2->ldev_block_size;
1096 shost->max_channel = pchan_cur;
1097 shost->max_id = enquiry2->max_targets;
1098 memsize = enquiry2->mem_size >> 20;
1099 cb->safte_enabled = (enquiry2->fault_mgmt == MYRB_FAULT_SAFTE);
1101 * Initialize the Controller Queue Depth, Driver Queue Depth,
1102 * Logical Drive Count, Maximum Blocks per Command, Controller
1103 * Scatter/Gather Limit, and Driver Scatter/Gather Limit.
1104 * The Driver Queue Depth must be at most one less than the
1105 * Controller Queue Depth to allow for an automatic drive
1106 * rebuild operation.
1108 shost->can_queue = cb->enquiry->max_tcq;
1109 if (shost->can_queue < 3)
1110 shost->can_queue = enquiry2->max_cmds;
1111 if (shost->can_queue < 3)
1112 /* Play safe and disable TCQ */
1113 shost->can_queue = 1;
1115 if (shost->can_queue > MYRB_CMD_MBOX_COUNT - 2)
1116 shost->can_queue = MYRB_CMD_MBOX_COUNT - 2;
1117 shost->max_sectors = enquiry2->max_sectors;
1118 shost->sg_tablesize = enquiry2->max_sge;
1119 if (shost->sg_tablesize > MYRB_SCATTER_GATHER_LIMIT)
1120 shost->sg_tablesize = MYRB_SCATTER_GATHER_LIMIT;
1122 * Initialize the Stripe Size, Segment Size, and Geometry Translation.
1124 cb->stripe_size = config2->blocks_per_stripe * config2->block_factor
1125 >> (10 - MYRB_BLKSIZE_BITS);
1126 cb->segment_size = config2->blocks_per_cacheline * config2->block_factor
1127 >> (10 - MYRB_BLKSIZE_BITS);
1128 /* Assume 255/63 translation */
1129 cb->ldev_geom_heads = 255;
1130 cb->ldev_geom_sectors = 63;
1131 if (config2->drive_geometry) {
1132 cb->ldev_geom_heads = 128;
1133 cb->ldev_geom_sectors = 32;
1137 * Initialize the Background Initialization Status.
1139 if ((cb->fw_version[0] == '4' &&
1140 strcmp(cb->fw_version, "4.08") >= 0) ||
1141 (cb->fw_version[0] == '5' &&
1142 strcmp(cb->fw_version, "5.08") >= 0)) {
1143 cb->bgi_status_supported = true;
1144 myrb_bgi_control(cb);
1146 cb->last_rbld_status = MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS;
1150 shost_printk(KERN_INFO, cb->host,
1151 "Configuring %s PCI RAID Controller\n", cb->model_name);
1152 shost_printk(KERN_INFO, cb->host,
1153 " Firmware Version: %s, Memory Size: %dMB\n",
1154 cb->fw_version, memsize);
1155 if (cb->io_addr == 0)
1156 shost_printk(KERN_INFO, cb->host,
1157 " I/O Address: n/a, PCI Address: 0x%lX, IRQ Channel: %d\n",
1158 (unsigned long)cb->pci_addr, cb->irq);
1160 shost_printk(KERN_INFO, cb->host,
1161 " I/O Address: 0x%lX, PCI Address: 0x%lX, IRQ Channel: %d\n",
1162 (unsigned long)cb->io_addr, (unsigned long)cb->pci_addr,
1164 shost_printk(KERN_INFO, cb->host,
1165 " Controller Queue Depth: %d, Maximum Blocks per Command: %d\n",
1166 cb->host->can_queue, cb->host->max_sectors);
1167 shost_printk(KERN_INFO, cb->host,
1168 " Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n",
1169 cb->host->can_queue, cb->host->sg_tablesize,
1170 MYRB_SCATTER_GATHER_LIMIT);
1171 shost_printk(KERN_INFO, cb->host,
1172 " Stripe Size: %dKB, Segment Size: %dKB, BIOS Geometry: %d/%d%s\n",
1173 cb->stripe_size, cb->segment_size,
1174 cb->ldev_geom_heads, cb->ldev_geom_sectors,
1176 " SAF-TE Enclosure Management Enabled" : "");
1177 shost_printk(KERN_INFO, cb->host,
1178 " Physical: %d/%d channels %d/%d/%d devices\n",
1179 pchan_cur, pchan_max, 0, cb->enquiry->pdev_dead,
1182 shost_printk(KERN_INFO, cb->host,
1183 " Logical: 1/1 channels, %d/%d disks\n",
1184 cb->enquiry->ldev_count, MYRB_MAX_LDEVS);
1187 dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
1188 enquiry2, enquiry2_addr);
1189 dma_free_coherent(&pdev->dev, sizeof(struct myrb_config2),
1190 config2, config2_addr);
1196 * myrb_unmap - unmaps controller structures
1198 static void myrb_unmap(struct myrb_hba *cb)
1200 if (cb->ldev_info_buf) {
1201 size_t ldev_info_size = sizeof(struct myrb_ldev_info) *
1203 dma_free_coherent(&cb->pdev->dev, ldev_info_size,
1204 cb->ldev_info_buf, cb->ldev_info_addr);
1205 cb->ldev_info_buf = NULL;
1207 if (cb->err_table) {
1208 size_t err_table_size = sizeof(struct myrb_error_entry) *
1209 MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
1210 dma_free_coherent(&cb->pdev->dev, err_table_size,
1211 cb->err_table, cb->err_table_addr);
1212 cb->err_table = NULL;
1215 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_enquiry),
1216 cb->enquiry, cb->enquiry_addr);
1219 if (cb->first_stat_mbox) {
1220 dma_free_coherent(&cb->pdev->dev, cb->stat_mbox_size,
1221 cb->first_stat_mbox, cb->stat_mbox_addr);
1222 cb->first_stat_mbox = NULL;
1224 if (cb->first_cmd_mbox) {
1225 dma_free_coherent(&cb->pdev->dev, cb->cmd_mbox_size,
1226 cb->first_cmd_mbox, cb->cmd_mbox_addr);
1227 cb->first_cmd_mbox = NULL;
1232 * myrb_cleanup - cleanup controller structures
1234 static void myrb_cleanup(struct myrb_hba *cb)
1236 struct pci_dev *pdev = cb->pdev;
1238 /* Free the memory mailbox, status, and related structures */
1241 if (cb->mmio_base) {
1242 if (cb->disable_intr)
1243 cb->disable_intr(cb->io_base);
1244 iounmap(cb->mmio_base);
1247 free_irq(cb->irq, cb);
1249 release_region(cb->io_addr, 0x80);
1250 pci_set_drvdata(pdev, NULL);
1251 pci_disable_device(pdev);
1252 scsi_host_put(cb->host);
1255 static int myrb_host_reset(struct scsi_cmnd *scmd)
1257 struct Scsi_Host *shost = scmd->device->host;
1258 struct myrb_hba *cb = shost_priv(shost);
1260 cb->reset(cb->io_base);
1264 static int myrb_pthru_queuecommand(struct Scsi_Host *shost,
1265 struct scsi_cmnd *scmd)
1267 struct request *rq = scsi_cmd_to_rq(scmd);
1268 struct myrb_hba *cb = shost_priv(shost);
1269 struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
1270 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
1271 struct myrb_dcdb *dcdb;
1272 dma_addr_t dcdb_addr;
1273 struct scsi_device *sdev = scmd->device;
1274 struct scatterlist *sgl;
1275 unsigned long flags;
1278 myrb_reset_cmd(cmd_blk);
1279 dcdb = dma_pool_alloc(cb->dcdb_pool, GFP_ATOMIC, &dcdb_addr);
1281 return SCSI_MLQUEUE_HOST_BUSY;
1282 nsge = scsi_dma_map(scmd);
1284 dma_pool_free(cb->dcdb_pool, dcdb, dcdb_addr);
1285 scmd->result = (DID_ERROR << 16);
1286 scmd->scsi_done(scmd);
1290 mbox->type3.opcode = MYRB_CMD_DCDB;
1291 mbox->type3.id = rq->tag + 3;
1292 mbox->type3.addr = dcdb_addr;
1293 dcdb->channel = sdev->channel;
1294 dcdb->target = sdev->id;
1295 switch (scmd->sc_data_direction) {
1297 dcdb->data_xfer = MYRB_DCDB_XFER_NONE;
1300 dcdb->data_xfer = MYRB_DCDB_XFER_SYSTEM_TO_DEVICE;
1302 case DMA_FROM_DEVICE:
1303 dcdb->data_xfer = MYRB_DCDB_XFER_DEVICE_TO_SYSTEM;
1306 dcdb->data_xfer = MYRB_DCDB_XFER_ILLEGAL;
1309 dcdb->early_status = false;
1310 if (rq->timeout <= 10)
1311 dcdb->timeout = MYRB_DCDB_TMO_10_SECS;
1312 else if (rq->timeout <= 60)
1313 dcdb->timeout = MYRB_DCDB_TMO_60_SECS;
1314 else if (rq->timeout <= 600)
1315 dcdb->timeout = MYRB_DCDB_TMO_10_MINS;
1317 dcdb->timeout = MYRB_DCDB_TMO_24_HRS;
1318 dcdb->no_autosense = false;
1319 dcdb->allow_disconnect = true;
1320 sgl = scsi_sglist(scmd);
1321 dcdb->dma_addr = sg_dma_address(sgl);
1322 if (sg_dma_len(sgl) > USHRT_MAX) {
1323 dcdb->xfer_len_lo = sg_dma_len(sgl) & 0xffff;
1324 dcdb->xfer_len_hi4 = sg_dma_len(sgl) >> 16;
1326 dcdb->xfer_len_lo = sg_dma_len(sgl);
1327 dcdb->xfer_len_hi4 = 0;
1329 dcdb->cdb_len = scmd->cmd_len;
1330 dcdb->sense_len = sizeof(dcdb->sense);
1331 memcpy(&dcdb->cdb, scmd->cmnd, scmd->cmd_len);
1333 spin_lock_irqsave(&cb->queue_lock, flags);
1334 cb->qcmd(cb, cmd_blk);
1335 spin_unlock_irqrestore(&cb->queue_lock, flags);
1339 static void myrb_inquiry(struct myrb_hba *cb,
1340 struct scsi_cmnd *scmd)
1342 unsigned char inq[36] = {
1343 0x00, 0x00, 0x03, 0x02, 0x20, 0x00, 0x01, 0x00,
1344 0x4d, 0x59, 0x4c, 0x45, 0x58, 0x20, 0x20, 0x20,
1345 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1346 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1347 0x20, 0x20, 0x20, 0x20,
1350 if (cb->bus_width > 16)
1352 if (cb->bus_width > 8)
1354 memcpy(&inq[16], cb->model_name, 16);
1355 memcpy(&inq[32], cb->fw_version, 1);
1356 memcpy(&inq[33], &cb->fw_version[2], 2);
1357 memcpy(&inq[35], &cb->fw_version[7], 1);
1359 scsi_sg_copy_from_buffer(scmd, (void *)inq, 36);
1363 myrb_mode_sense(struct myrb_hba *cb, struct scsi_cmnd *scmd,
1364 struct myrb_ldev_info *ldev_info)
1366 unsigned char modes[32], *mode_pg;
1370 dbd = (scmd->cmnd[1] & 0x08) == 0x08;
1373 mode_pg = &modes[4];
1376 mode_pg = &modes[12];
1378 memset(modes, 0, sizeof(modes));
1379 modes[0] = mode_len - 1;
1381 unsigned char *block_desc = &modes[4];
1384 put_unaligned_be32(ldev_info->size, &block_desc[0]);
1385 put_unaligned_be32(cb->ldev_block_size, &block_desc[5]);
1389 if (ldev_info->wb_enabled)
1391 if (cb->segment_size) {
1393 put_unaligned_be16(cb->segment_size, &mode_pg[14]);
1396 scsi_sg_copy_from_buffer(scmd, modes, mode_len);
1399 static void myrb_request_sense(struct myrb_hba *cb,
1400 struct scsi_cmnd *scmd)
1402 scsi_build_sense(scmd, 0, NO_SENSE, 0, 0);
1403 scsi_sg_copy_from_buffer(scmd, scmd->sense_buffer,
1404 SCSI_SENSE_BUFFERSIZE);
1407 static void myrb_read_capacity(struct myrb_hba *cb, struct scsi_cmnd *scmd,
1408 struct myrb_ldev_info *ldev_info)
1410 unsigned char data[8];
1412 dev_dbg(&scmd->device->sdev_gendev,
1413 "Capacity %u, blocksize %u\n",
1414 ldev_info->size, cb->ldev_block_size);
1415 put_unaligned_be32(ldev_info->size - 1, &data[0]);
1416 put_unaligned_be32(cb->ldev_block_size, &data[4]);
1417 scsi_sg_copy_from_buffer(scmd, data, 8);
1420 static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
1421 struct scsi_cmnd *scmd)
1423 struct myrb_hba *cb = shost_priv(shost);
1424 struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
1425 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
1426 struct myrb_ldev_info *ldev_info;
1427 struct scsi_device *sdev = scmd->device;
1428 struct scatterlist *sgl;
1429 unsigned long flags;
1434 ldev_info = sdev->hostdata;
1435 if (ldev_info->state != MYRB_DEVICE_ONLINE &&
1436 ldev_info->state != MYRB_DEVICE_WO) {
1437 dev_dbg(&shost->shost_gendev, "ldev %u in state %x, skip\n",
1438 sdev->id, ldev_info ? ldev_info->state : 0xff);
1439 scmd->result = (DID_BAD_TARGET << 16);
1440 scmd->scsi_done(scmd);
1443 switch (scmd->cmnd[0]) {
1444 case TEST_UNIT_READY:
1445 scmd->result = (DID_OK << 16);
1446 scmd->scsi_done(scmd);
1449 if (scmd->cmnd[1] & 1) {
1450 /* Illegal request, invalid field in CDB */
1451 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1453 myrb_inquiry(cb, scmd);
1454 scmd->result = (DID_OK << 16);
1456 scmd->scsi_done(scmd);
1458 case SYNCHRONIZE_CACHE:
1459 scmd->result = (DID_OK << 16);
1460 scmd->scsi_done(scmd);
1463 if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
1464 (scmd->cmnd[2] & 0x3F) != 0x08) {
1465 /* Illegal request, invalid field in CDB */
1466 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1468 myrb_mode_sense(cb, scmd, ldev_info);
1469 scmd->result = (DID_OK << 16);
1471 scmd->scsi_done(scmd);
1474 if ((scmd->cmnd[1] & 1) ||
1475 (scmd->cmnd[8] & 1)) {
1476 /* Illegal request, invalid field in CDB */
1477 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1478 scmd->scsi_done(scmd);
1481 lba = get_unaligned_be32(&scmd->cmnd[2]);
1483 /* Illegal request, invalid field in CDB */
1484 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1485 scmd->scsi_done(scmd);
1488 myrb_read_capacity(cb, scmd, ldev_info);
1489 scmd->scsi_done(scmd);
1492 myrb_request_sense(cb, scmd);
1493 scmd->result = (DID_OK << 16);
1495 case SEND_DIAGNOSTIC:
1496 if (scmd->cmnd[1] != 0x04) {
1497 /* Illegal request, invalid field in CDB */
1498 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1500 /* Assume good status */
1501 scmd->result = (DID_OK << 16);
1503 scmd->scsi_done(scmd);
1506 if (ldev_info->state == MYRB_DEVICE_WO) {
1507 /* Data protect, attempt to read invalid data */
1508 scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06);
1509 scmd->scsi_done(scmd);
1514 lba = (((scmd->cmnd[1] & 0x1F) << 16) |
1515 (scmd->cmnd[2] << 8) |
1517 block_cnt = scmd->cmnd[4];
1520 if (ldev_info->state == MYRB_DEVICE_WO) {
1521 /* Data protect, attempt to read invalid data */
1522 scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06);
1523 scmd->scsi_done(scmd);
1528 case VERIFY: /* 0x2F */
1529 case WRITE_VERIFY: /* 0x2E */
1530 lba = get_unaligned_be32(&scmd->cmnd[2]);
1531 block_cnt = get_unaligned_be16(&scmd->cmnd[7]);
1534 if (ldev_info->state == MYRB_DEVICE_WO) {
1535 /* Data protect, attempt to read invalid data */
1536 scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06);
1537 scmd->scsi_done(scmd);
1542 case VERIFY_12: /* 0xAF */
1543 case WRITE_VERIFY_12: /* 0xAE */
1544 lba = get_unaligned_be32(&scmd->cmnd[2]);
1545 block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
1548 /* Illegal request, invalid opcode */
1549 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x20, 0);
1550 scmd->scsi_done(scmd);
1554 myrb_reset_cmd(cmd_blk);
1555 mbox->type5.id = scsi_cmd_to_rq(scmd)->tag + 3;
1556 if (scmd->sc_data_direction == DMA_NONE)
1558 nsge = scsi_dma_map(scmd);
1560 sgl = scsi_sglist(scmd);
1561 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
1562 mbox->type5.opcode = MYRB_CMD_READ;
1564 mbox->type5.opcode = MYRB_CMD_WRITE;
1566 mbox->type5.ld.xfer_len = block_cnt;
1567 mbox->type5.ld.ldev_num = sdev->id;
1568 mbox->type5.lba = lba;
1569 mbox->type5.addr = (u32)sg_dma_address(sgl);
1571 struct myrb_sge *hw_sgl;
1572 dma_addr_t hw_sgl_addr;
1575 hw_sgl = dma_pool_alloc(cb->sg_pool, GFP_ATOMIC, &hw_sgl_addr);
1577 return SCSI_MLQUEUE_HOST_BUSY;
1579 cmd_blk->sgl = hw_sgl;
1580 cmd_blk->sgl_addr = hw_sgl_addr;
1582 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
1583 mbox->type5.opcode = MYRB_CMD_READ_SG;
1585 mbox->type5.opcode = MYRB_CMD_WRITE_SG;
1587 mbox->type5.ld.xfer_len = block_cnt;
1588 mbox->type5.ld.ldev_num = sdev->id;
1589 mbox->type5.lba = lba;
1590 mbox->type5.addr = hw_sgl_addr;
1591 mbox->type5.sg_count = nsge;
1593 scsi_for_each_sg(scmd, sgl, nsge, i) {
1594 hw_sgl->sge_addr = (u32)sg_dma_address(sgl);
1595 hw_sgl->sge_count = (u32)sg_dma_len(sgl);
1600 spin_lock_irqsave(&cb->queue_lock, flags);
1601 cb->qcmd(cb, cmd_blk);
1602 spin_unlock_irqrestore(&cb->queue_lock, flags);
1607 static int myrb_queuecommand(struct Scsi_Host *shost,
1608 struct scsi_cmnd *scmd)
1610 struct scsi_device *sdev = scmd->device;
1612 if (sdev->channel > myrb_logical_channel(shost)) {
1613 scmd->result = (DID_BAD_TARGET << 16);
1614 scmd->scsi_done(scmd);
1617 if (sdev->channel == myrb_logical_channel(shost))
1618 return myrb_ldev_queuecommand(shost, scmd);
1620 return myrb_pthru_queuecommand(shost, scmd);
1623 static int myrb_ldev_slave_alloc(struct scsi_device *sdev)
1625 struct myrb_hba *cb = shost_priv(sdev->host);
1626 struct myrb_ldev_info *ldev_info;
1627 unsigned short ldev_num = sdev->id;
1628 enum raid_level level;
1630 ldev_info = cb->ldev_info_buf + ldev_num;
1634 sdev->hostdata = kzalloc(sizeof(*ldev_info), GFP_KERNEL);
1635 if (!sdev->hostdata)
1637 dev_dbg(&sdev->sdev_gendev,
1638 "slave alloc ldev %d state %x\n",
1639 ldev_num, ldev_info->state);
1640 memcpy(sdev->hostdata, ldev_info,
1641 sizeof(*ldev_info));
1642 switch (ldev_info->raid_level) {
1643 case MYRB_RAID_LEVEL0:
1644 level = RAID_LEVEL_LINEAR;
1646 case MYRB_RAID_LEVEL1:
1647 level = RAID_LEVEL_1;
1649 case MYRB_RAID_LEVEL3:
1650 level = RAID_LEVEL_3;
1652 case MYRB_RAID_LEVEL5:
1653 level = RAID_LEVEL_5;
1655 case MYRB_RAID_LEVEL6:
1656 level = RAID_LEVEL_6;
1658 case MYRB_RAID_JBOD:
1659 level = RAID_LEVEL_JBOD;
1662 level = RAID_LEVEL_UNKNOWN;
1665 raid_set_level(myrb_raid_template, &sdev->sdev_gendev, level);
1669 static int myrb_pdev_slave_alloc(struct scsi_device *sdev)
1671 struct myrb_hba *cb = shost_priv(sdev->host);
1672 struct myrb_pdev_state *pdev_info;
1673 unsigned short status;
1675 if (sdev->id > MYRB_MAX_TARGETS)
1678 pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA);
1682 status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
1684 if (status != MYRB_STATUS_SUCCESS) {
1685 dev_dbg(&sdev->sdev_gendev,
1686 "Failed to get device state, status %x\n",
1691 if (!pdev_info->present) {
1692 dev_dbg(&sdev->sdev_gendev,
1693 "device not present, skip\n");
1697 dev_dbg(&sdev->sdev_gendev,
1698 "slave alloc pdev %d:%d state %x\n",
1699 sdev->channel, sdev->id, pdev_info->state);
1700 sdev->hostdata = pdev_info;
1705 static int myrb_slave_alloc(struct scsi_device *sdev)
1707 if (sdev->channel > myrb_logical_channel(sdev->host))
1713 if (sdev->channel == myrb_logical_channel(sdev->host))
1714 return myrb_ldev_slave_alloc(sdev);
1716 return myrb_pdev_slave_alloc(sdev);
1719 static int myrb_slave_configure(struct scsi_device *sdev)
1721 struct myrb_ldev_info *ldev_info;
1723 if (sdev->channel > myrb_logical_channel(sdev->host))
1726 if (sdev->channel < myrb_logical_channel(sdev->host)) {
1727 sdev->no_uld_attach = 1;
1733 ldev_info = sdev->hostdata;
1736 if (ldev_info->state != MYRB_DEVICE_ONLINE)
1737 sdev_printk(KERN_INFO, sdev,
1738 "Logical drive is %s\n",
1739 myrb_devstate_name(ldev_info->state));
1741 sdev->tagged_supported = 1;
1745 static void myrb_slave_destroy(struct scsi_device *sdev)
1747 kfree(sdev->hostdata);
1750 static int myrb_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1751 sector_t capacity, int geom[])
1753 struct myrb_hba *cb = shost_priv(sdev->host);
1755 geom[0] = cb->ldev_geom_heads;
1756 geom[1] = cb->ldev_geom_sectors;
1757 geom[2] = sector_div(capacity, geom[0] * geom[1]);
1762 static ssize_t raid_state_show(struct device *dev,
1763 struct device_attribute *attr, char *buf)
1765 struct scsi_device *sdev = to_scsi_device(dev);
1766 struct myrb_hba *cb = shost_priv(sdev->host);
1769 if (!sdev->hostdata)
1770 return snprintf(buf, 16, "Unknown\n");
1772 if (sdev->channel == myrb_logical_channel(sdev->host)) {
1773 struct myrb_ldev_info *ldev_info = sdev->hostdata;
1776 name = myrb_devstate_name(ldev_info->state);
1778 ret = snprintf(buf, 32, "%s\n", name);
1780 ret = snprintf(buf, 32, "Invalid (%02X)\n",
1783 struct myrb_pdev_state *pdev_info = sdev->hostdata;
1784 unsigned short status;
1787 status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
1789 if (status != MYRB_STATUS_SUCCESS)
1790 sdev_printk(KERN_INFO, sdev,
1791 "Failed to get device state, status %x\n",
1794 if (!pdev_info->present)
1797 name = myrb_devstate_name(pdev_info->state);
1799 ret = snprintf(buf, 32, "%s\n", name);
1801 ret = snprintf(buf, 32, "Invalid (%02X)\n",
1807 static ssize_t raid_state_store(struct device *dev,
1808 struct device_attribute *attr, const char *buf, size_t count)
1810 struct scsi_device *sdev = to_scsi_device(dev);
1811 struct myrb_hba *cb = shost_priv(sdev->host);
1812 struct myrb_pdev_state *pdev_info;
1813 enum myrb_devstate new_state;
1814 unsigned short status;
1816 if (!strncmp(buf, "kill", 4) ||
1817 !strncmp(buf, "offline", 7))
1818 new_state = MYRB_DEVICE_DEAD;
1819 else if (!strncmp(buf, "online", 6))
1820 new_state = MYRB_DEVICE_ONLINE;
1821 else if (!strncmp(buf, "standby", 7))
1822 new_state = MYRB_DEVICE_STANDBY;
1826 pdev_info = sdev->hostdata;
1828 sdev_printk(KERN_INFO, sdev,
1829 "Failed - no physical device information\n");
1832 if (!pdev_info->present) {
1833 sdev_printk(KERN_INFO, sdev,
1834 "Failed - device not present\n");
1838 if (pdev_info->state == new_state)
1841 status = myrb_set_pdev_state(cb, sdev, new_state);
1843 case MYRB_STATUS_SUCCESS:
1845 case MYRB_STATUS_START_DEVICE_FAILED:
1846 sdev_printk(KERN_INFO, sdev,
1847 "Failed - Unable to Start Device\n");
1850 case MYRB_STATUS_NO_DEVICE:
1851 sdev_printk(KERN_INFO, sdev,
1852 "Failed - No Device at Address\n");
1855 case MYRB_STATUS_INVALID_CHANNEL_OR_TARGET:
1856 sdev_printk(KERN_INFO, sdev,
1857 "Failed - Invalid Channel or Target or Modifier\n");
1860 case MYRB_STATUS_CHANNEL_BUSY:
1861 sdev_printk(KERN_INFO, sdev,
1862 "Failed - Channel Busy\n");
1866 sdev_printk(KERN_INFO, sdev,
1867 "Failed - Unexpected Status %04X\n", status);
1873 static DEVICE_ATTR_RW(raid_state);
1875 static ssize_t raid_level_show(struct device *dev,
1876 struct device_attribute *attr, char *buf)
1878 struct scsi_device *sdev = to_scsi_device(dev);
1880 if (sdev->channel == myrb_logical_channel(sdev->host)) {
1881 struct myrb_ldev_info *ldev_info = sdev->hostdata;
1887 name = myrb_raidlevel_name(ldev_info->raid_level);
1889 return snprintf(buf, 32, "Invalid (%02X)\n",
1891 return snprintf(buf, 32, "%s\n", name);
1893 return snprintf(buf, 32, "Physical Drive\n");
1895 static DEVICE_ATTR_RO(raid_level);
1897 static ssize_t rebuild_show(struct device *dev,
1898 struct device_attribute *attr, char *buf)
1900 struct scsi_device *sdev = to_scsi_device(dev);
1901 struct myrb_hba *cb = shost_priv(sdev->host);
1902 struct myrb_rbld_progress rbld_buf;
1903 unsigned char status;
1905 if (sdev->channel < myrb_logical_channel(sdev->host))
1906 return snprintf(buf, 32, "physical device - not rebuilding\n");
1908 status = myrb_get_rbld_progress(cb, &rbld_buf);
1910 if (rbld_buf.ldev_num != sdev->id ||
1911 status != MYRB_STATUS_SUCCESS)
1912 return snprintf(buf, 32, "not rebuilding\n");
1914 return snprintf(buf, 32, "rebuilding block %u of %u\n",
1915 rbld_buf.ldev_size - rbld_buf.blocks_left,
1916 rbld_buf.ldev_size);
1919 static ssize_t rebuild_store(struct device *dev,
1920 struct device_attribute *attr, const char *buf, size_t count)
1922 struct scsi_device *sdev = to_scsi_device(dev);
1923 struct myrb_hba *cb = shost_priv(sdev->host);
1924 struct myrb_cmdblk *cmd_blk;
1925 union myrb_cmd_mbox *mbox;
1926 unsigned short status;
1930 rc = kstrtoint(buf, 0, &start);
1934 if (sdev->channel >= myrb_logical_channel(sdev->host))
1937 status = myrb_get_rbld_progress(cb, NULL);
1939 if (status == MYRB_STATUS_SUCCESS) {
1940 sdev_printk(KERN_INFO, sdev,
1941 "Rebuild Not Initiated; already in progress\n");
1944 mutex_lock(&cb->dcmd_mutex);
1945 cmd_blk = &cb->dcmd_blk;
1946 myrb_reset_cmd(cmd_blk);
1947 mbox = &cmd_blk->mbox;
1948 mbox->type3D.opcode = MYRB_CMD_REBUILD_ASYNC;
1949 mbox->type3D.id = MYRB_DCMD_TAG;
1950 mbox->type3D.channel = sdev->channel;
1951 mbox->type3D.target = sdev->id;
1952 status = myrb_exec_cmd(cb, cmd_blk);
1953 mutex_unlock(&cb->dcmd_mutex);
1955 struct pci_dev *pdev = cb->pdev;
1956 unsigned char *rate;
1957 dma_addr_t rate_addr;
1959 if (status != MYRB_STATUS_SUCCESS) {
1960 sdev_printk(KERN_INFO, sdev,
1961 "Rebuild Not Cancelled; not in progress\n");
1965 rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
1966 &rate_addr, GFP_KERNEL);
1968 sdev_printk(KERN_INFO, sdev,
1969 "Cancellation of Rebuild Failed - Out of Memory\n");
1972 mutex_lock(&cb->dcmd_mutex);
1973 cmd_blk = &cb->dcmd_blk;
1974 myrb_reset_cmd(cmd_blk);
1975 mbox = &cmd_blk->mbox;
1976 mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
1977 mbox->type3R.id = MYRB_DCMD_TAG;
1978 mbox->type3R.rbld_rate = 0xFF;
1979 mbox->type3R.addr = rate_addr;
1980 status = myrb_exec_cmd(cb, cmd_blk);
1981 dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
1982 mutex_unlock(&cb->dcmd_mutex);
1984 if (status == MYRB_STATUS_SUCCESS) {
1985 sdev_printk(KERN_INFO, sdev, "Rebuild %s\n",
1986 start ? "Initiated" : "Cancelled");
1990 sdev_printk(KERN_INFO, sdev,
1991 "Rebuild Not Cancelled, status 0x%x\n",
1997 case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
1998 msg = "Attempt to Rebuild Online or Unresponsive Drive";
2000 case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
2001 msg = "New Disk Failed During Rebuild";
2003 case MYRB_STATUS_INVALID_ADDRESS:
2004 msg = "Invalid Device Address";
2006 case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
2007 msg = "Already in Progress";
2014 sdev_printk(KERN_INFO, sdev,
2015 "Rebuild Failed - %s\n", msg);
2017 sdev_printk(KERN_INFO, sdev,
2018 "Rebuild Failed, status 0x%x\n", status);
2022 static DEVICE_ATTR_RW(rebuild);
2024 static ssize_t consistency_check_store(struct device *dev,
2025 struct device_attribute *attr, const char *buf, size_t count)
2027 struct scsi_device *sdev = to_scsi_device(dev);
2028 struct myrb_hba *cb = shost_priv(sdev->host);
2029 struct myrb_rbld_progress rbld_buf;
2030 struct myrb_cmdblk *cmd_blk;
2031 union myrb_cmd_mbox *mbox;
2032 unsigned short ldev_num = 0xFFFF;
2033 unsigned short status;
2037 rc = kstrtoint(buf, 0, &start);
2041 if (sdev->channel < myrb_logical_channel(sdev->host))
2044 status = myrb_get_rbld_progress(cb, &rbld_buf);
2046 if (status == MYRB_STATUS_SUCCESS) {
2047 sdev_printk(KERN_INFO, sdev,
2048 "Check Consistency Not Initiated; already in progress\n");
2051 mutex_lock(&cb->dcmd_mutex);
2052 cmd_blk = &cb->dcmd_blk;
2053 myrb_reset_cmd(cmd_blk);
2054 mbox = &cmd_blk->mbox;
2055 mbox->type3C.opcode = MYRB_CMD_CHECK_CONSISTENCY_ASYNC;
2056 mbox->type3C.id = MYRB_DCMD_TAG;
2057 mbox->type3C.ldev_num = sdev->id;
2058 mbox->type3C.auto_restore = true;
2060 status = myrb_exec_cmd(cb, cmd_blk);
2061 mutex_unlock(&cb->dcmd_mutex);
2063 struct pci_dev *pdev = cb->pdev;
2064 unsigned char *rate;
2065 dma_addr_t rate_addr;
2067 if (ldev_num != sdev->id) {
2068 sdev_printk(KERN_INFO, sdev,
2069 "Check Consistency Not Cancelled; not in progress\n");
2072 rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
2073 &rate_addr, GFP_KERNEL);
2075 sdev_printk(KERN_INFO, sdev,
2076 "Cancellation of Check Consistency Failed - Out of Memory\n");
2079 mutex_lock(&cb->dcmd_mutex);
2080 cmd_blk = &cb->dcmd_blk;
2081 myrb_reset_cmd(cmd_blk);
2082 mbox = &cmd_blk->mbox;
2083 mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
2084 mbox->type3R.id = MYRB_DCMD_TAG;
2085 mbox->type3R.rbld_rate = 0xFF;
2086 mbox->type3R.addr = rate_addr;
2087 status = myrb_exec_cmd(cb, cmd_blk);
2088 dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
2089 mutex_unlock(&cb->dcmd_mutex);
2091 if (status == MYRB_STATUS_SUCCESS) {
2092 sdev_printk(KERN_INFO, sdev, "Check Consistency %s\n",
2093 start ? "Initiated" : "Cancelled");
2097 sdev_printk(KERN_INFO, sdev,
2098 "Check Consistency Not Cancelled, status 0x%x\n",
2104 case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
2105 msg = "Dependent Physical Device is DEAD";
2107 case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
2108 msg = "New Disk Failed During Rebuild";
2110 case MYRB_STATUS_INVALID_ADDRESS:
2111 msg = "Invalid or Nonredundant Logical Drive";
2113 case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
2114 msg = "Already in Progress";
2121 sdev_printk(KERN_INFO, sdev,
2122 "Check Consistency Failed - %s\n", msg);
2124 sdev_printk(KERN_INFO, sdev,
2125 "Check Consistency Failed, status 0x%x\n", status);
2130 static ssize_t consistency_check_show(struct device *dev,
2131 struct device_attribute *attr, char *buf)
2133 return rebuild_show(dev, attr, buf);
2135 static DEVICE_ATTR_RW(consistency_check);
2137 static ssize_t ctlr_num_show(struct device *dev,
2138 struct device_attribute *attr, char *buf)
2140 struct Scsi_Host *shost = class_to_shost(dev);
2141 struct myrb_hba *cb = shost_priv(shost);
2143 return snprintf(buf, 20, "%u\n", cb->ctlr_num);
2145 static DEVICE_ATTR_RO(ctlr_num);
2147 static ssize_t firmware_show(struct device *dev,
2148 struct device_attribute *attr, char *buf)
2150 struct Scsi_Host *shost = class_to_shost(dev);
2151 struct myrb_hba *cb = shost_priv(shost);
2153 return snprintf(buf, 16, "%s\n", cb->fw_version);
2155 static DEVICE_ATTR_RO(firmware);
2157 static ssize_t model_show(struct device *dev,
2158 struct device_attribute *attr, char *buf)
2160 struct Scsi_Host *shost = class_to_shost(dev);
2161 struct myrb_hba *cb = shost_priv(shost);
2163 return snprintf(buf, 16, "%s\n", cb->model_name);
2165 static DEVICE_ATTR_RO(model);
2167 static ssize_t flush_cache_store(struct device *dev,
2168 struct device_attribute *attr, const char *buf, size_t count)
2170 struct Scsi_Host *shost = class_to_shost(dev);
2171 struct myrb_hba *cb = shost_priv(shost);
2172 unsigned short status;
2174 status = myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
2175 if (status == MYRB_STATUS_SUCCESS) {
2176 shost_printk(KERN_INFO, shost,
2177 "Cache Flush Completed\n");
2180 shost_printk(KERN_INFO, shost,
2181 "Cache Flush Failed, status %x\n", status);
2184 static DEVICE_ATTR_WO(flush_cache);
2186 static struct device_attribute *myrb_sdev_attrs[] = {
2188 &dev_attr_consistency_check,
2189 &dev_attr_raid_state,
2190 &dev_attr_raid_level,
2194 static struct device_attribute *myrb_shost_attrs[] = {
2198 &dev_attr_flush_cache,
2202 static struct scsi_host_template myrb_template = {
2203 .module = THIS_MODULE,
2205 .proc_name = "myrb",
2206 .queuecommand = myrb_queuecommand,
2207 .eh_host_reset_handler = myrb_host_reset,
2208 .slave_alloc = myrb_slave_alloc,
2209 .slave_configure = myrb_slave_configure,
2210 .slave_destroy = myrb_slave_destroy,
2211 .bios_param = myrb_biosparam,
2212 .cmd_size = sizeof(struct myrb_cmdblk),
2213 .shost_attrs = myrb_shost_attrs,
2214 .sdev_attrs = myrb_sdev_attrs,
2219 * myrb_is_raid - return boolean indicating device is raid volume
2220 * @dev: the device struct object
2222 static int myrb_is_raid(struct device *dev)
2224 struct scsi_device *sdev = to_scsi_device(dev);
2226 return sdev->channel == myrb_logical_channel(sdev->host);
2230 * myrb_get_resync - get raid volume resync percent complete
2231 * @dev: the device struct object
2233 static void myrb_get_resync(struct device *dev)
2235 struct scsi_device *sdev = to_scsi_device(dev);
2236 struct myrb_hba *cb = shost_priv(sdev->host);
2237 struct myrb_rbld_progress rbld_buf;
2238 unsigned int percent_complete = 0;
2239 unsigned short status;
2240 unsigned int ldev_size = 0, remaining = 0;
2242 if (sdev->channel < myrb_logical_channel(sdev->host))
2244 status = myrb_get_rbld_progress(cb, &rbld_buf);
2245 if (status == MYRB_STATUS_SUCCESS) {
2246 if (rbld_buf.ldev_num == sdev->id) {
2247 ldev_size = rbld_buf.ldev_size;
2248 remaining = rbld_buf.blocks_left;
2251 if (remaining && ldev_size)
2252 percent_complete = (ldev_size - remaining) * 100 / ldev_size;
2253 raid_set_resync(myrb_raid_template, dev, percent_complete);
2257 * myrb_get_state - get raid volume status
2258 * @dev: the device struct object
2260 static void myrb_get_state(struct device *dev)
2262 struct scsi_device *sdev = to_scsi_device(dev);
2263 struct myrb_hba *cb = shost_priv(sdev->host);
2264 struct myrb_ldev_info *ldev_info = sdev->hostdata;
2265 enum raid_state state = RAID_STATE_UNKNOWN;
2266 unsigned short status;
2268 if (sdev->channel < myrb_logical_channel(sdev->host) || !ldev_info)
2269 state = RAID_STATE_UNKNOWN;
2271 status = myrb_get_rbld_progress(cb, NULL);
2272 if (status == MYRB_STATUS_SUCCESS)
2273 state = RAID_STATE_RESYNCING;
2275 switch (ldev_info->state) {
2276 case MYRB_DEVICE_ONLINE:
2277 state = RAID_STATE_ACTIVE;
2279 case MYRB_DEVICE_WO:
2280 case MYRB_DEVICE_CRITICAL:
2281 state = RAID_STATE_DEGRADED;
2284 state = RAID_STATE_OFFLINE;
2288 raid_set_state(myrb_raid_template, dev, state);
2291 static struct raid_function_template myrb_raid_functions = {
2292 .cookie = &myrb_template,
2293 .is_raid = myrb_is_raid,
2294 .get_resync = myrb_get_resync,
2295 .get_state = myrb_get_state,
2298 static void myrb_handle_scsi(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk,
2299 struct scsi_cmnd *scmd)
2301 unsigned short status;
2306 scsi_dma_unmap(scmd);
2308 if (cmd_blk->dcdb) {
2309 memcpy(scmd->sense_buffer, &cmd_blk->dcdb->sense, 64);
2310 dma_pool_free(cb->dcdb_pool, cmd_blk->dcdb,
2311 cmd_blk->dcdb_addr);
2312 cmd_blk->dcdb = NULL;
2315 dma_pool_free(cb->sg_pool, cmd_blk->sgl, cmd_blk->sgl_addr);
2316 cmd_blk->sgl = NULL;
2317 cmd_blk->sgl_addr = 0;
2319 status = cmd_blk->status;
2321 case MYRB_STATUS_SUCCESS:
2322 case MYRB_STATUS_DEVICE_BUSY:
2323 scmd->result = (DID_OK << 16) | status;
2325 case MYRB_STATUS_BAD_DATA:
2326 dev_dbg(&scmd->device->sdev_gendev,
2327 "Bad Data Encountered\n");
2328 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
2329 /* Unrecovered read error */
2330 scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x11, 0);
2333 scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x0C, 0);
2335 case MYRB_STATUS_IRRECOVERABLE_DATA_ERROR:
2336 scmd_printk(KERN_ERR, scmd, "Irrecoverable Data Error\n");
2337 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
2338 /* Unrecovered read error, auto-reallocation failed */
2339 scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x11, 0x04);
2341 /* Write error, auto-reallocation failed */
2342 scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x0C, 0x02);
2344 case MYRB_STATUS_LDRV_NONEXISTENT_OR_OFFLINE:
2345 dev_dbg(&scmd->device->sdev_gendev,
2346 "Logical Drive Nonexistent or Offline");
2347 scmd->result = (DID_BAD_TARGET << 16);
2349 case MYRB_STATUS_ACCESS_BEYOND_END_OF_LDRV:
2350 dev_dbg(&scmd->device->sdev_gendev,
2351 "Attempt to Access Beyond End of Logical Drive");
2352 /* Logical block address out of range */
2353 scsi_build_sense(scmd, 0, NOT_READY, 0x21, 0);
2355 case MYRB_STATUS_DEVICE_NONRESPONSIVE:
2356 dev_dbg(&scmd->device->sdev_gendev, "Device nonresponsive\n");
2357 scmd->result = (DID_BAD_TARGET << 16);
2360 scmd_printk(KERN_ERR, scmd,
2361 "Unexpected Error Status %04X", status);
2362 scmd->result = (DID_ERROR << 16);
2365 scmd->scsi_done(scmd);
2368 static void myrb_handle_cmdblk(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
2373 if (cmd_blk->completion) {
2374 complete(cmd_blk->completion);
2375 cmd_blk->completion = NULL;
2379 static void myrb_monitor(struct work_struct *work)
2381 struct myrb_hba *cb = container_of(work,
2382 struct myrb_hba, monitor_work.work);
2383 struct Scsi_Host *shost = cb->host;
2384 unsigned long interval = MYRB_PRIMARY_MONITOR_INTERVAL;
2386 dev_dbg(&shost->shost_gendev, "monitor tick\n");
2388 if (cb->new_ev_seq > cb->old_ev_seq) {
2389 int event = cb->old_ev_seq;
2391 dev_dbg(&shost->shost_gendev,
2392 "get event log no %d/%d\n",
2393 cb->new_ev_seq, event);
2394 myrb_get_event(cb, event);
2395 cb->old_ev_seq = event + 1;
2397 } else if (cb->need_err_info) {
2398 cb->need_err_info = false;
2399 dev_dbg(&shost->shost_gendev, "get error table\n");
2400 myrb_get_errtable(cb);
2402 } else if (cb->need_rbld && cb->rbld_first) {
2403 cb->need_rbld = false;
2404 dev_dbg(&shost->shost_gendev,
2405 "get rebuild progress\n");
2406 myrb_update_rbld_progress(cb);
2408 } else if (cb->need_ldev_info) {
2409 cb->need_ldev_info = false;
2410 dev_dbg(&shost->shost_gendev,
2411 "get logical drive info\n");
2412 myrb_get_ldev_info(cb);
2414 } else if (cb->need_rbld) {
2415 cb->need_rbld = false;
2416 dev_dbg(&shost->shost_gendev,
2417 "get rebuild progress\n");
2418 myrb_update_rbld_progress(cb);
2420 } else if (cb->need_cc_status) {
2421 cb->need_cc_status = false;
2422 dev_dbg(&shost->shost_gendev,
2423 "get consistency check progress\n");
2424 myrb_get_cc_progress(cb);
2426 } else if (cb->need_bgi_status) {
2427 cb->need_bgi_status = false;
2428 dev_dbg(&shost->shost_gendev, "get background init status\n");
2429 myrb_bgi_control(cb);
2432 dev_dbg(&shost->shost_gendev, "new enquiry\n");
2433 mutex_lock(&cb->dma_mutex);
2434 myrb_hba_enquiry(cb);
2435 mutex_unlock(&cb->dma_mutex);
2436 if ((cb->new_ev_seq - cb->old_ev_seq > 0) ||
2437 cb->need_err_info || cb->need_rbld ||
2438 cb->need_ldev_info || cb->need_cc_status ||
2439 cb->need_bgi_status) {
2440 dev_dbg(&shost->shost_gendev,
2441 "reschedule monitor\n");
2446 cb->primary_monitor_time = jiffies;
2447 queue_delayed_work(cb->work_q, &cb->monitor_work, interval);
2451 * myrb_err_status - reports controller BIOS messages
2453 * Controller BIOS messages are passed through the Error Status Register
2454 * when the driver performs the BIOS handshaking.
2456 * Return: true for fatal errors and false otherwise.
2458 static bool myrb_err_status(struct myrb_hba *cb, unsigned char error,
2459 unsigned char parm0, unsigned char parm1)
2461 struct pci_dev *pdev = cb->pdev;
2465 dev_info(&pdev->dev,
2466 "Physical Device %d:%d Not Responding\n",
2470 dev_notice(&pdev->dev, "Spinning Up Drives\n");
2473 dev_notice(&pdev->dev, "Configuration Checksum Error\n");
2476 dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n");
2479 dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n");
2482 dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n",
2486 dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n");
2489 dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n");
2492 dev_notice(&pdev->dev, "New Controller Configuration Found\n");
2495 dev_err(&pdev->dev, "Fatal Memory Parity Error\n");
2498 dev_err(&pdev->dev, "Unknown Initialization Error %02X\n",
2506 * Hardware-specific functions
2510 * DAC960 LA Series Controllers
2513 static inline void DAC960_LA_hw_mbox_new_cmd(void __iomem *base)
2515 writeb(DAC960_LA_IDB_HWMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
2518 static inline void DAC960_LA_ack_hw_mbox_status(void __iomem *base)
2520 writeb(DAC960_LA_IDB_HWMBOX_ACK_STS, base + DAC960_LA_IDB_OFFSET);
2523 static inline void DAC960_LA_reset_ctrl(void __iomem *base)
2525 writeb(DAC960_LA_IDB_CTRL_RESET, base + DAC960_LA_IDB_OFFSET);
2528 static inline void DAC960_LA_mem_mbox_new_cmd(void __iomem *base)
2530 writeb(DAC960_LA_IDB_MMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
2533 static inline bool DAC960_LA_hw_mbox_is_full(void __iomem *base)
2535 unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
2537 return !(idb & DAC960_LA_IDB_HWMBOX_EMPTY);
2540 static inline bool DAC960_LA_init_in_progress(void __iomem *base)
2542 unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
2544 return !(idb & DAC960_LA_IDB_INIT_DONE);
2547 static inline void DAC960_LA_ack_hw_mbox_intr(void __iomem *base)
2549 writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET);
2552 static inline void DAC960_LA_ack_intr(void __iomem *base)
2554 writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ | DAC960_LA_ODB_MMBOX_ACK_IRQ,
2555 base + DAC960_LA_ODB_OFFSET);
2558 static inline bool DAC960_LA_hw_mbox_status_available(void __iomem *base)
2560 unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET);
2562 return odb & DAC960_LA_ODB_HWMBOX_STS_AVAIL;
2565 static inline void DAC960_LA_enable_intr(void __iomem *base)
2567 unsigned char odb = 0xFF;
2569 odb &= ~DAC960_LA_IRQMASK_DISABLE_IRQ;
2570 writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
2573 static inline void DAC960_LA_disable_intr(void __iomem *base)
2575 unsigned char odb = 0xFF;
2577 odb |= DAC960_LA_IRQMASK_DISABLE_IRQ;
2578 writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
2581 static inline void DAC960_LA_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
2582 union myrb_cmd_mbox *mbox)
2584 mem_mbox->words[1] = mbox->words[1];
2585 mem_mbox->words[2] = mbox->words[2];
2586 mem_mbox->words[3] = mbox->words[3];
2587 /* Memory barrier to prevent reordering */
2589 mem_mbox->words[0] = mbox->words[0];
2590 /* Memory barrier to force PCI access */
2594 static inline void DAC960_LA_write_hw_mbox(void __iomem *base,
2595 union myrb_cmd_mbox *mbox)
2597 writel(mbox->words[0], base + DAC960_LA_CMDOP_OFFSET);
2598 writel(mbox->words[1], base + DAC960_LA_MBOX4_OFFSET);
2599 writel(mbox->words[2], base + DAC960_LA_MBOX8_OFFSET);
2600 writeb(mbox->bytes[12], base + DAC960_LA_MBOX12_OFFSET);
2603 static inline unsigned short DAC960_LA_read_status(void __iomem *base)
2605 return readw(base + DAC960_LA_STS_OFFSET);
2609 DAC960_LA_read_error_status(void __iomem *base, unsigned char *error,
2610 unsigned char *param0, unsigned char *param1)
2612 unsigned char errsts = readb(base + DAC960_LA_ERRSTS_OFFSET);
2614 if (!(errsts & DAC960_LA_ERRSTS_PENDING))
2616 errsts &= ~DAC960_LA_ERRSTS_PENDING;
2619 *param0 = readb(base + DAC960_LA_CMDOP_OFFSET);
2620 *param1 = readb(base + DAC960_LA_CMDID_OFFSET);
2621 writeb(0xFF, base + DAC960_LA_ERRSTS_OFFSET);
2625 static inline unsigned short
2626 DAC960_LA_mbox_init(struct pci_dev *pdev, void __iomem *base,
2627 union myrb_cmd_mbox *mbox)
2629 unsigned short status;
2632 while (timeout < MYRB_MAILBOX_TIMEOUT) {
2633 if (!DAC960_LA_hw_mbox_is_full(base))
2638 if (DAC960_LA_hw_mbox_is_full(base)) {
2640 "Timeout waiting for empty mailbox\n");
2641 return MYRB_STATUS_SUBSYS_TIMEOUT;
2643 DAC960_LA_write_hw_mbox(base, mbox);
2644 DAC960_LA_hw_mbox_new_cmd(base);
2646 while (timeout < MYRB_MAILBOX_TIMEOUT) {
2647 if (DAC960_LA_hw_mbox_status_available(base))
2652 if (!DAC960_LA_hw_mbox_status_available(base)) {
2653 dev_err(&pdev->dev, "Timeout waiting for mailbox status\n");
2654 return MYRB_STATUS_SUBSYS_TIMEOUT;
2656 status = DAC960_LA_read_status(base);
2657 DAC960_LA_ack_hw_mbox_intr(base);
2658 DAC960_LA_ack_hw_mbox_status(base);
2663 static int DAC960_LA_hw_init(struct pci_dev *pdev,
2664 struct myrb_hba *cb, void __iomem *base)
2667 unsigned char error, parm0, parm1;
2669 DAC960_LA_disable_intr(base);
2670 DAC960_LA_ack_hw_mbox_status(base);
2672 while (DAC960_LA_init_in_progress(base) &&
2673 timeout < MYRB_MAILBOX_TIMEOUT) {
2674 if (DAC960_LA_read_error_status(base, &error,
2676 myrb_err_status(cb, error, parm0, parm1))
2681 if (timeout == MYRB_MAILBOX_TIMEOUT) {
2683 "Timeout waiting for Controller Initialisation\n");
2686 if (!myrb_enable_mmio(cb, DAC960_LA_mbox_init)) {
2688 "Unable to Enable Memory Mailbox Interface\n");
2689 DAC960_LA_reset_ctrl(base);
2692 DAC960_LA_enable_intr(base);
2693 cb->qcmd = myrb_qcmd;
2694 cb->write_cmd_mbox = DAC960_LA_write_cmd_mbox;
2695 if (cb->dual_mode_interface)
2696 cb->get_cmd_mbox = DAC960_LA_mem_mbox_new_cmd;
2698 cb->get_cmd_mbox = DAC960_LA_hw_mbox_new_cmd;
2699 cb->disable_intr = DAC960_LA_disable_intr;
2700 cb->reset = DAC960_LA_reset_ctrl;
2705 static irqreturn_t DAC960_LA_intr_handler(int irq, void *arg)
2707 struct myrb_hba *cb = arg;
2708 void __iomem *base = cb->io_base;
2709 struct myrb_stat_mbox *next_stat_mbox;
2710 unsigned long flags;
2712 spin_lock_irqsave(&cb->queue_lock, flags);
2713 DAC960_LA_ack_intr(base);
2714 next_stat_mbox = cb->next_stat_mbox;
2715 while (next_stat_mbox->valid) {
2716 unsigned char id = next_stat_mbox->id;
2717 struct scsi_cmnd *scmd = NULL;
2718 struct myrb_cmdblk *cmd_blk = NULL;
2720 if (id == MYRB_DCMD_TAG)
2721 cmd_blk = &cb->dcmd_blk;
2722 else if (id == MYRB_MCMD_TAG)
2723 cmd_blk = &cb->mcmd_blk;
2725 scmd = scsi_host_find_tag(cb->host, id - 3);
2727 cmd_blk = scsi_cmd_priv(scmd);
2730 cmd_blk->status = next_stat_mbox->status;
2732 dev_err(&cb->pdev->dev,
2733 "Unhandled command completion %d\n", id);
2735 memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
2736 if (++next_stat_mbox > cb->last_stat_mbox)
2737 next_stat_mbox = cb->first_stat_mbox;
2741 myrb_handle_cmdblk(cb, cmd_blk);
2743 myrb_handle_scsi(cb, cmd_blk, scmd);
2746 cb->next_stat_mbox = next_stat_mbox;
2747 spin_unlock_irqrestore(&cb->queue_lock, flags);
2751 static struct myrb_privdata DAC960_LA_privdata = {
2752 .hw_init = DAC960_LA_hw_init,
2753 .irq_handler = DAC960_LA_intr_handler,
2754 .mmio_size = DAC960_LA_mmio_size,
2758 * DAC960 PG Series Controllers
2760 static inline void DAC960_PG_hw_mbox_new_cmd(void __iomem *base)
2762 writel(DAC960_PG_IDB_HWMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
2765 static inline void DAC960_PG_ack_hw_mbox_status(void __iomem *base)
2767 writel(DAC960_PG_IDB_HWMBOX_ACK_STS, base + DAC960_PG_IDB_OFFSET);
2770 static inline void DAC960_PG_reset_ctrl(void __iomem *base)
2772 writel(DAC960_PG_IDB_CTRL_RESET, base + DAC960_PG_IDB_OFFSET);
2775 static inline void DAC960_PG_mem_mbox_new_cmd(void __iomem *base)
2777 writel(DAC960_PG_IDB_MMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
2780 static inline bool DAC960_PG_hw_mbox_is_full(void __iomem *base)
2782 unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
2784 return idb & DAC960_PG_IDB_HWMBOX_FULL;
2787 static inline bool DAC960_PG_init_in_progress(void __iomem *base)
2789 unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
2791 return idb & DAC960_PG_IDB_INIT_IN_PROGRESS;
2794 static inline void DAC960_PG_ack_hw_mbox_intr(void __iomem *base)
2796 writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET);
2799 static inline void DAC960_PG_ack_intr(void __iomem *base)
2801 writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ | DAC960_PG_ODB_MMBOX_ACK_IRQ,
2802 base + DAC960_PG_ODB_OFFSET);
2805 static inline bool DAC960_PG_hw_mbox_status_available(void __iomem *base)
2807 unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET);
2809 return odb & DAC960_PG_ODB_HWMBOX_STS_AVAIL;
2812 static inline void DAC960_PG_enable_intr(void __iomem *base)
2814 unsigned int imask = (unsigned int)-1;
2816 imask &= ~DAC960_PG_IRQMASK_DISABLE_IRQ;
2817 writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
2820 static inline void DAC960_PG_disable_intr(void __iomem *base)
2822 unsigned int imask = (unsigned int)-1;
2824 writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
2827 static inline void DAC960_PG_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
2828 union myrb_cmd_mbox *mbox)
2830 mem_mbox->words[1] = mbox->words[1];
2831 mem_mbox->words[2] = mbox->words[2];
2832 mem_mbox->words[3] = mbox->words[3];
2833 /* Memory barrier to prevent reordering */
2835 mem_mbox->words[0] = mbox->words[0];
2836 /* Memory barrier to force PCI access */
2840 static inline void DAC960_PG_write_hw_mbox(void __iomem *base,
2841 union myrb_cmd_mbox *mbox)
2843 writel(mbox->words[0], base + DAC960_PG_CMDOP_OFFSET);
2844 writel(mbox->words[1], base + DAC960_PG_MBOX4_OFFSET);
2845 writel(mbox->words[2], base + DAC960_PG_MBOX8_OFFSET);
2846 writeb(mbox->bytes[12], base + DAC960_PG_MBOX12_OFFSET);
2849 static inline unsigned short
2850 DAC960_PG_read_status(void __iomem *base)
2852 return readw(base + DAC960_PG_STS_OFFSET);
2856 DAC960_PG_read_error_status(void __iomem *base, unsigned char *error,
2857 unsigned char *param0, unsigned char *param1)
2859 unsigned char errsts = readb(base + DAC960_PG_ERRSTS_OFFSET);
2861 if (!(errsts & DAC960_PG_ERRSTS_PENDING))
2863 errsts &= ~DAC960_PG_ERRSTS_PENDING;
2865 *param0 = readb(base + DAC960_PG_CMDOP_OFFSET);
2866 *param1 = readb(base + DAC960_PG_CMDID_OFFSET);
2867 writeb(0, base + DAC960_PG_ERRSTS_OFFSET);
2871 static inline unsigned short
2872 DAC960_PG_mbox_init(struct pci_dev *pdev, void __iomem *base,
2873 union myrb_cmd_mbox *mbox)
2875 unsigned short status;
2878 while (timeout < MYRB_MAILBOX_TIMEOUT) {
2879 if (!DAC960_PG_hw_mbox_is_full(base))
2884 if (DAC960_PG_hw_mbox_is_full(base)) {
2886 "Timeout waiting for empty mailbox\n");
2887 return MYRB_STATUS_SUBSYS_TIMEOUT;
2889 DAC960_PG_write_hw_mbox(base, mbox);
2890 DAC960_PG_hw_mbox_new_cmd(base);
2893 while (timeout < MYRB_MAILBOX_TIMEOUT) {
2894 if (DAC960_PG_hw_mbox_status_available(base))
2899 if (!DAC960_PG_hw_mbox_status_available(base)) {
2901 "Timeout waiting for mailbox status\n");
2902 return MYRB_STATUS_SUBSYS_TIMEOUT;
2904 status = DAC960_PG_read_status(base);
2905 DAC960_PG_ack_hw_mbox_intr(base);
2906 DAC960_PG_ack_hw_mbox_status(base);
2911 static int DAC960_PG_hw_init(struct pci_dev *pdev,
2912 struct myrb_hba *cb, void __iomem *base)
2915 unsigned char error, parm0, parm1;
2917 DAC960_PG_disable_intr(base);
2918 DAC960_PG_ack_hw_mbox_status(base);
2920 while (DAC960_PG_init_in_progress(base) &&
2921 timeout < MYRB_MAILBOX_TIMEOUT) {
2922 if (DAC960_PG_read_error_status(base, &error,
2924 myrb_err_status(cb, error, parm0, parm1))
2929 if (timeout == MYRB_MAILBOX_TIMEOUT) {
2931 "Timeout waiting for Controller Initialisation\n");
2934 if (!myrb_enable_mmio(cb, DAC960_PG_mbox_init)) {
2936 "Unable to Enable Memory Mailbox Interface\n");
2937 DAC960_PG_reset_ctrl(base);
2940 DAC960_PG_enable_intr(base);
2941 cb->qcmd = myrb_qcmd;
2942 cb->write_cmd_mbox = DAC960_PG_write_cmd_mbox;
2943 if (cb->dual_mode_interface)
2944 cb->get_cmd_mbox = DAC960_PG_mem_mbox_new_cmd;
2946 cb->get_cmd_mbox = DAC960_PG_hw_mbox_new_cmd;
2947 cb->disable_intr = DAC960_PG_disable_intr;
2948 cb->reset = DAC960_PG_reset_ctrl;
2953 static irqreturn_t DAC960_PG_intr_handler(int irq, void *arg)
2955 struct myrb_hba *cb = arg;
2956 void __iomem *base = cb->io_base;
2957 struct myrb_stat_mbox *next_stat_mbox;
2958 unsigned long flags;
2960 spin_lock_irqsave(&cb->queue_lock, flags);
2961 DAC960_PG_ack_intr(base);
2962 next_stat_mbox = cb->next_stat_mbox;
2963 while (next_stat_mbox->valid) {
2964 unsigned char id = next_stat_mbox->id;
2965 struct scsi_cmnd *scmd = NULL;
2966 struct myrb_cmdblk *cmd_blk = NULL;
2968 if (id == MYRB_DCMD_TAG)
2969 cmd_blk = &cb->dcmd_blk;
2970 else if (id == MYRB_MCMD_TAG)
2971 cmd_blk = &cb->mcmd_blk;
2973 scmd = scsi_host_find_tag(cb->host, id - 3);
2975 cmd_blk = scsi_cmd_priv(scmd);
2978 cmd_blk->status = next_stat_mbox->status;
2980 dev_err(&cb->pdev->dev,
2981 "Unhandled command completion %d\n", id);
2983 memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
2984 if (++next_stat_mbox > cb->last_stat_mbox)
2985 next_stat_mbox = cb->first_stat_mbox;
2988 myrb_handle_cmdblk(cb, cmd_blk);
2990 myrb_handle_scsi(cb, cmd_blk, scmd);
2992 cb->next_stat_mbox = next_stat_mbox;
2993 spin_unlock_irqrestore(&cb->queue_lock, flags);
2997 static struct myrb_privdata DAC960_PG_privdata = {
2998 .hw_init = DAC960_PG_hw_init,
2999 .irq_handler = DAC960_PG_intr_handler,
3000 .mmio_size = DAC960_PG_mmio_size,
3005 * DAC960 PD Series Controllers
3008 static inline void DAC960_PD_hw_mbox_new_cmd(void __iomem *base)
3010 writeb(DAC960_PD_IDB_HWMBOX_NEW_CMD, base + DAC960_PD_IDB_OFFSET);
3013 static inline void DAC960_PD_ack_hw_mbox_status(void __iomem *base)
3015 writeb(DAC960_PD_IDB_HWMBOX_ACK_STS, base + DAC960_PD_IDB_OFFSET);
3018 static inline void DAC960_PD_reset_ctrl(void __iomem *base)
3020 writeb(DAC960_PD_IDB_CTRL_RESET, base + DAC960_PD_IDB_OFFSET);
3023 static inline bool DAC960_PD_hw_mbox_is_full(void __iomem *base)
3025 unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
3027 return idb & DAC960_PD_IDB_HWMBOX_FULL;
3030 static inline bool DAC960_PD_init_in_progress(void __iomem *base)
3032 unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
3034 return idb & DAC960_PD_IDB_INIT_IN_PROGRESS;
3037 static inline void DAC960_PD_ack_intr(void __iomem *base)
3039 writeb(DAC960_PD_ODB_HWMBOX_ACK_IRQ, base + DAC960_PD_ODB_OFFSET);
3042 static inline bool DAC960_PD_hw_mbox_status_available(void __iomem *base)
3044 unsigned char odb = readb(base + DAC960_PD_ODB_OFFSET);
3046 return odb & DAC960_PD_ODB_HWMBOX_STS_AVAIL;
3049 static inline void DAC960_PD_enable_intr(void __iomem *base)
3051 writeb(DAC960_PD_IRQMASK_ENABLE_IRQ, base + DAC960_PD_IRQEN_OFFSET);
3054 static inline void DAC960_PD_disable_intr(void __iomem *base)
3056 writeb(0, base + DAC960_PD_IRQEN_OFFSET);
3059 static inline void DAC960_PD_write_cmd_mbox(void __iomem *base,
3060 union myrb_cmd_mbox *mbox)
3062 writel(mbox->words[0], base + DAC960_PD_CMDOP_OFFSET);
3063 writel(mbox->words[1], base + DAC960_PD_MBOX4_OFFSET);
3064 writel(mbox->words[2], base + DAC960_PD_MBOX8_OFFSET);
3065 writeb(mbox->bytes[12], base + DAC960_PD_MBOX12_OFFSET);
3068 static inline unsigned char
3069 DAC960_PD_read_status_cmd_ident(void __iomem *base)
3071 return readb(base + DAC960_PD_STSID_OFFSET);
3074 static inline unsigned short
3075 DAC960_PD_read_status(void __iomem *base)
3077 return readw(base + DAC960_PD_STS_OFFSET);
3081 DAC960_PD_read_error_status(void __iomem *base, unsigned char *error,
3082 unsigned char *param0, unsigned char *param1)
3084 unsigned char errsts = readb(base + DAC960_PD_ERRSTS_OFFSET);
3086 if (!(errsts & DAC960_PD_ERRSTS_PENDING))
3088 errsts &= ~DAC960_PD_ERRSTS_PENDING;
3090 *param0 = readb(base + DAC960_PD_CMDOP_OFFSET);
3091 *param1 = readb(base + DAC960_PD_CMDID_OFFSET);
3092 writeb(0, base + DAC960_PD_ERRSTS_OFFSET);
3096 static void DAC960_PD_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
3098 void __iomem *base = cb->io_base;
3099 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3101 while (DAC960_PD_hw_mbox_is_full(base))
3103 DAC960_PD_write_cmd_mbox(base, mbox);
3104 DAC960_PD_hw_mbox_new_cmd(base);
3107 static int DAC960_PD_hw_init(struct pci_dev *pdev,
3108 struct myrb_hba *cb, void __iomem *base)
3111 unsigned char error, parm0, parm1;
3113 if (!request_region(cb->io_addr, 0x80, "myrb")) {
3114 dev_err(&pdev->dev, "IO port 0x%lx busy\n",
3115 (unsigned long)cb->io_addr);
3118 DAC960_PD_disable_intr(base);
3119 DAC960_PD_ack_hw_mbox_status(base);
3121 while (DAC960_PD_init_in_progress(base) &&
3122 timeout < MYRB_MAILBOX_TIMEOUT) {
3123 if (DAC960_PD_read_error_status(base, &error,
3125 myrb_err_status(cb, error, parm0, parm1))
3130 if (timeout == MYRB_MAILBOX_TIMEOUT) {
3132 "Timeout waiting for Controller Initialisation\n");
3135 if (!myrb_enable_mmio(cb, NULL)) {
3137 "Unable to Enable Memory Mailbox Interface\n");
3138 DAC960_PD_reset_ctrl(base);
3141 DAC960_PD_enable_intr(base);
3142 cb->qcmd = DAC960_PD_qcmd;
3143 cb->disable_intr = DAC960_PD_disable_intr;
3144 cb->reset = DAC960_PD_reset_ctrl;
3149 static irqreturn_t DAC960_PD_intr_handler(int irq, void *arg)
3151 struct myrb_hba *cb = arg;
3152 void __iomem *base = cb->io_base;
3153 unsigned long flags;
3155 spin_lock_irqsave(&cb->queue_lock, flags);
3156 while (DAC960_PD_hw_mbox_status_available(base)) {
3157 unsigned char id = DAC960_PD_read_status_cmd_ident(base);
3158 struct scsi_cmnd *scmd = NULL;
3159 struct myrb_cmdblk *cmd_blk = NULL;
3161 if (id == MYRB_DCMD_TAG)
3162 cmd_blk = &cb->dcmd_blk;
3163 else if (id == MYRB_MCMD_TAG)
3164 cmd_blk = &cb->mcmd_blk;
3166 scmd = scsi_host_find_tag(cb->host, id - 3);
3168 cmd_blk = scsi_cmd_priv(scmd);
3171 cmd_blk->status = DAC960_PD_read_status(base);
3173 dev_err(&cb->pdev->dev,
3174 "Unhandled command completion %d\n", id);
3176 DAC960_PD_ack_intr(base);
3177 DAC960_PD_ack_hw_mbox_status(base);
3180 myrb_handle_cmdblk(cb, cmd_blk);
3182 myrb_handle_scsi(cb, cmd_blk, scmd);
3184 spin_unlock_irqrestore(&cb->queue_lock, flags);
3188 static struct myrb_privdata DAC960_PD_privdata = {
3189 .hw_init = DAC960_PD_hw_init,
3190 .irq_handler = DAC960_PD_intr_handler,
3191 .mmio_size = DAC960_PD_mmio_size,
3196 * DAC960 P Series Controllers
3198 * Similar to the DAC960 PD Series Controllers, but some commands have
3202 static inline void myrb_translate_enquiry(void *enq)
3204 memcpy(enq + 132, enq + 36, 64);
3205 memset(enq + 36, 0, 96);
3208 static inline void myrb_translate_devstate(void *state)
3210 memcpy(state + 2, state + 3, 1);
3211 memmove(state + 4, state + 5, 2);
3212 memmove(state + 6, state + 8, 4);
3215 static inline void myrb_translate_to_rw_command(struct myrb_cmdblk *cmd_blk)
3217 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3218 int ldev_num = mbox->type5.ld.ldev_num;
3220 mbox->bytes[3] &= 0x7;
3221 mbox->bytes[3] |= mbox->bytes[7] << 6;
3222 mbox->bytes[7] = ldev_num;
3225 static inline void myrb_translate_from_rw_command(struct myrb_cmdblk *cmd_blk)
3227 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3228 int ldev_num = mbox->bytes[7];
3230 mbox->bytes[7] = mbox->bytes[3] >> 6;
3231 mbox->bytes[3] &= 0x7;
3232 mbox->bytes[3] |= ldev_num << 3;
3235 static void DAC960_P_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
3237 void __iomem *base = cb->io_base;
3238 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3240 switch (mbox->common.opcode) {
3241 case MYRB_CMD_ENQUIRY:
3242 mbox->common.opcode = MYRB_CMD_ENQUIRY_OLD;
3244 case MYRB_CMD_GET_DEVICE_STATE:
3245 mbox->common.opcode = MYRB_CMD_GET_DEVICE_STATE_OLD;
3248 mbox->common.opcode = MYRB_CMD_READ_OLD;
3249 myrb_translate_to_rw_command(cmd_blk);
3251 case MYRB_CMD_WRITE:
3252 mbox->common.opcode = MYRB_CMD_WRITE_OLD;
3253 myrb_translate_to_rw_command(cmd_blk);
3255 case MYRB_CMD_READ_SG:
3256 mbox->common.opcode = MYRB_CMD_READ_SG_OLD;
3257 myrb_translate_to_rw_command(cmd_blk);
3259 case MYRB_CMD_WRITE_SG:
3260 mbox->common.opcode = MYRB_CMD_WRITE_SG_OLD;
3261 myrb_translate_to_rw_command(cmd_blk);
3266 while (DAC960_PD_hw_mbox_is_full(base))
3268 DAC960_PD_write_cmd_mbox(base, mbox);
3269 DAC960_PD_hw_mbox_new_cmd(base);
3273 static int DAC960_P_hw_init(struct pci_dev *pdev,
3274 struct myrb_hba *cb, void __iomem *base)
3277 unsigned char error, parm0, parm1;
3279 if (!request_region(cb->io_addr, 0x80, "myrb")) {
3280 dev_err(&pdev->dev, "IO port 0x%lx busy\n",
3281 (unsigned long)cb->io_addr);
3284 DAC960_PD_disable_intr(base);
3285 DAC960_PD_ack_hw_mbox_status(base);
3287 while (DAC960_PD_init_in_progress(base) &&
3288 timeout < MYRB_MAILBOX_TIMEOUT) {
3289 if (DAC960_PD_read_error_status(base, &error,
3291 myrb_err_status(cb, error, parm0, parm1))
3296 if (timeout == MYRB_MAILBOX_TIMEOUT) {
3298 "Timeout waiting for Controller Initialisation\n");
3301 if (!myrb_enable_mmio(cb, NULL)) {
3303 "Unable to allocate DMA mapped memory\n");
3304 DAC960_PD_reset_ctrl(base);
3307 DAC960_PD_enable_intr(base);
3308 cb->qcmd = DAC960_P_qcmd;
3309 cb->disable_intr = DAC960_PD_disable_intr;
3310 cb->reset = DAC960_PD_reset_ctrl;
3315 static irqreturn_t DAC960_P_intr_handler(int irq, void *arg)
3317 struct myrb_hba *cb = arg;
3318 void __iomem *base = cb->io_base;
3319 unsigned long flags;
3321 spin_lock_irqsave(&cb->queue_lock, flags);
3322 while (DAC960_PD_hw_mbox_status_available(base)) {
3323 unsigned char id = DAC960_PD_read_status_cmd_ident(base);
3324 struct scsi_cmnd *scmd = NULL;
3325 struct myrb_cmdblk *cmd_blk = NULL;
3326 union myrb_cmd_mbox *mbox;
3327 enum myrb_cmd_opcode op;
3330 if (id == MYRB_DCMD_TAG)
3331 cmd_blk = &cb->dcmd_blk;
3332 else if (id == MYRB_MCMD_TAG)
3333 cmd_blk = &cb->mcmd_blk;
3335 scmd = scsi_host_find_tag(cb->host, id - 3);
3337 cmd_blk = scsi_cmd_priv(scmd);
3340 cmd_blk->status = DAC960_PD_read_status(base);
3342 dev_err(&cb->pdev->dev,
3343 "Unhandled command completion %d\n", id);
3345 DAC960_PD_ack_intr(base);
3346 DAC960_PD_ack_hw_mbox_status(base);
3351 mbox = &cmd_blk->mbox;
3352 op = mbox->common.opcode;
3354 case MYRB_CMD_ENQUIRY_OLD:
3355 mbox->common.opcode = MYRB_CMD_ENQUIRY;
3356 myrb_translate_enquiry(cb->enquiry);
3358 case MYRB_CMD_READ_OLD:
3359 mbox->common.opcode = MYRB_CMD_READ;
3360 myrb_translate_from_rw_command(cmd_blk);
3362 case MYRB_CMD_WRITE_OLD:
3363 mbox->common.opcode = MYRB_CMD_WRITE;
3364 myrb_translate_from_rw_command(cmd_blk);
3366 case MYRB_CMD_READ_SG_OLD:
3367 mbox->common.opcode = MYRB_CMD_READ_SG;
3368 myrb_translate_from_rw_command(cmd_blk);
3370 case MYRB_CMD_WRITE_SG_OLD:
3371 mbox->common.opcode = MYRB_CMD_WRITE_SG;
3372 myrb_translate_from_rw_command(cmd_blk);
3378 myrb_handle_cmdblk(cb, cmd_blk);
3380 myrb_handle_scsi(cb, cmd_blk, scmd);
3382 spin_unlock_irqrestore(&cb->queue_lock, flags);
3386 static struct myrb_privdata DAC960_P_privdata = {
3387 .hw_init = DAC960_P_hw_init,
3388 .irq_handler = DAC960_P_intr_handler,
3389 .mmio_size = DAC960_PD_mmio_size,
3392 static struct myrb_hba *myrb_detect(struct pci_dev *pdev,
3393 const struct pci_device_id *entry)
3395 struct myrb_privdata *privdata =
3396 (struct myrb_privdata *)entry->driver_data;
3397 irq_handler_t irq_handler = privdata->irq_handler;
3398 unsigned int mmio_size = privdata->mmio_size;
3399 struct Scsi_Host *shost;
3400 struct myrb_hba *cb = NULL;
3402 shost = scsi_host_alloc(&myrb_template, sizeof(struct myrb_hba));
3404 dev_err(&pdev->dev, "Unable to allocate Controller\n");
3407 shost->max_cmd_len = 12;
3408 shost->max_lun = 256;
3409 cb = shost_priv(shost);
3410 mutex_init(&cb->dcmd_mutex);
3411 mutex_init(&cb->dma_mutex);
3415 if (pci_enable_device(pdev)) {
3416 dev_err(&pdev->dev, "Failed to enable PCI device\n");
3417 scsi_host_put(shost);
3421 if (privdata->hw_init == DAC960_PD_hw_init ||
3422 privdata->hw_init == DAC960_P_hw_init) {
3423 cb->io_addr = pci_resource_start(pdev, 0);
3424 cb->pci_addr = pci_resource_start(pdev, 1);
3426 cb->pci_addr = pci_resource_start(pdev, 0);
3428 pci_set_drvdata(pdev, cb);
3429 spin_lock_init(&cb->queue_lock);
3430 if (mmio_size < PAGE_SIZE)
3431 mmio_size = PAGE_SIZE;
3432 cb->mmio_base = ioremap(cb->pci_addr & PAGE_MASK, mmio_size);
3433 if (cb->mmio_base == NULL) {
3435 "Unable to map Controller Register Window\n");
3439 cb->io_base = cb->mmio_base + (cb->pci_addr & ~PAGE_MASK);
3440 if (privdata->hw_init(pdev, cb, cb->io_base))
3443 if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrb", cb) < 0) {
3445 "Unable to acquire IRQ Channel %d\n", pdev->irq);
3448 cb->irq = pdev->irq;
3453 "Failed to initialize Controller\n");
3458 static int myrb_probe(struct pci_dev *dev, const struct pci_device_id *entry)
3460 struct myrb_hba *cb;
3463 cb = myrb_detect(dev, entry);
3467 ret = myrb_get_hba_config(cb);
3473 if (!myrb_create_mempools(dev, cb)) {
3478 ret = scsi_add_host(cb->host, &dev->dev);
3480 dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret);
3481 myrb_destroy_mempools(cb);
3484 scsi_scan_host(cb->host);
3492 static void myrb_remove(struct pci_dev *pdev)
3494 struct myrb_hba *cb = pci_get_drvdata(pdev);
3496 shost_printk(KERN_NOTICE, cb->host, "Flushing Cache...");
3497 myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
3499 myrb_destroy_mempools(cb);
3503 static const struct pci_device_id myrb_id_table[] = {
3505 PCI_DEVICE_SUB(PCI_VENDOR_ID_DEC,
3506 PCI_DEVICE_ID_DEC_21285,
3507 PCI_VENDOR_ID_MYLEX,
3508 PCI_DEVICE_ID_MYLEX_DAC960_LA),
3509 .driver_data = (unsigned long) &DAC960_LA_privdata,
3512 PCI_DEVICE_DATA(MYLEX, DAC960_PG, &DAC960_PG_privdata),
3515 PCI_DEVICE_DATA(MYLEX, DAC960_PD, &DAC960_PD_privdata),
3518 PCI_DEVICE_DATA(MYLEX, DAC960_P, &DAC960_P_privdata),
3523 MODULE_DEVICE_TABLE(pci, myrb_id_table);
3525 static struct pci_driver myrb_pci_driver = {
3527 .id_table = myrb_id_table,
3528 .probe = myrb_probe,
3529 .remove = myrb_remove,
3532 static int __init myrb_init_module(void)
3536 myrb_raid_template = raid_class_attach(&myrb_raid_functions);
3537 if (!myrb_raid_template)
3540 ret = pci_register_driver(&myrb_pci_driver);
3542 raid_class_release(myrb_raid_template);
3547 static void __exit myrb_cleanup_module(void)
3549 pci_unregister_driver(&myrb_pci_driver);
3550 raid_class_release(myrb_raid_template);
3553 module_init(myrb_init_module);
3554 module_exit(myrb_cleanup_module);
3556 MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (Block interface)");
3557 MODULE_AUTHOR("Hannes Reinecke <hare@suse.com>");
3558 MODULE_LICENSE("GPL");