GNU Linux-libre 5.4.200-gnu1
[releases.git] / drivers / scsi / myrb.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
4  *
5  * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
6  *
7  * Based on the original DAC960 driver,
8  * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
9  * Portions Copyright 2002 by Mylex (An IBM Business Unit)
10  *
11  */
12
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/pci.h>
18 #include <linux/raid_class.h>
19 #include <asm/unaligned.h>
20 #include <scsi/scsi.h>
21 #include <scsi/scsi_host.h>
22 #include <scsi/scsi_device.h>
23 #include <scsi/scsi_cmnd.h>
24 #include <scsi/scsi_tcq.h>
25 #include "myrb.h"
26
27 static struct raid_template *myrb_raid_template;
28
29 static void myrb_monitor(struct work_struct *work);
30 static inline void myrb_translate_devstate(void *DeviceState);
31
32 static inline int myrb_logical_channel(struct Scsi_Host *shost)
33 {
34         return shost->max_channel - 1;
35 }
36
37 static struct myrb_devstate_name_entry {
38         enum myrb_devstate state;
39         const char *name;
40 } myrb_devstate_name_list[] = {
41         { MYRB_DEVICE_DEAD, "Dead" },
42         { MYRB_DEVICE_WO, "WriteOnly" },
43         { MYRB_DEVICE_ONLINE, "Online" },
44         { MYRB_DEVICE_CRITICAL, "Critical" },
45         { MYRB_DEVICE_STANDBY, "Standby" },
46         { MYRB_DEVICE_OFFLINE, "Offline" },
47 };
48
49 static const char *myrb_devstate_name(enum myrb_devstate state)
50 {
51         struct myrb_devstate_name_entry *entry = myrb_devstate_name_list;
52         int i;
53
54         for (i = 0; i < ARRAY_SIZE(myrb_devstate_name_list); i++) {
55                 if (entry[i].state == state)
56                         return entry[i].name;
57         }
58         return "Unknown";
59 }
60
61 static struct myrb_raidlevel_name_entry {
62         enum myrb_raidlevel level;
63         const char *name;
64 } myrb_raidlevel_name_list[] = {
65         { MYRB_RAID_LEVEL0, "RAID0" },
66         { MYRB_RAID_LEVEL1, "RAID1" },
67         { MYRB_RAID_LEVEL3, "RAID3" },
68         { MYRB_RAID_LEVEL5, "RAID5" },
69         { MYRB_RAID_LEVEL6, "RAID6" },
70         { MYRB_RAID_JBOD, "JBOD" },
71 };
72
73 static const char *myrb_raidlevel_name(enum myrb_raidlevel level)
74 {
75         struct myrb_raidlevel_name_entry *entry = myrb_raidlevel_name_list;
76         int i;
77
78         for (i = 0; i < ARRAY_SIZE(myrb_raidlevel_name_list); i++) {
79                 if (entry[i].level == level)
80                         return entry[i].name;
81         }
82         return NULL;
83 }
84
85 /**
86  * myrb_create_mempools - allocates auxiliary data structures
87  *
88  * Return: true on success, false otherwise.
89  */
90 static bool myrb_create_mempools(struct pci_dev *pdev, struct myrb_hba *cb)
91 {
92         size_t elem_size, elem_align;
93
94         elem_align = sizeof(struct myrb_sge);
95         elem_size = cb->host->sg_tablesize * elem_align;
96         cb->sg_pool = dma_pool_create("myrb_sg", &pdev->dev,
97                                       elem_size, elem_align, 0);
98         if (cb->sg_pool == NULL) {
99                 shost_printk(KERN_ERR, cb->host,
100                              "Failed to allocate SG pool\n");
101                 return false;
102         }
103
104         cb->dcdb_pool = dma_pool_create("myrb_dcdb", &pdev->dev,
105                                        sizeof(struct myrb_dcdb),
106                                        sizeof(unsigned int), 0);
107         if (!cb->dcdb_pool) {
108                 dma_pool_destroy(cb->sg_pool);
109                 cb->sg_pool = NULL;
110                 shost_printk(KERN_ERR, cb->host,
111                              "Failed to allocate DCDB pool\n");
112                 return false;
113         }
114
115         snprintf(cb->work_q_name, sizeof(cb->work_q_name),
116                  "myrb_wq_%d", cb->host->host_no);
117         cb->work_q = create_singlethread_workqueue(cb->work_q_name);
118         if (!cb->work_q) {
119                 dma_pool_destroy(cb->dcdb_pool);
120                 cb->dcdb_pool = NULL;
121                 dma_pool_destroy(cb->sg_pool);
122                 cb->sg_pool = NULL;
123                 shost_printk(KERN_ERR, cb->host,
124                              "Failed to create workqueue\n");
125                 return false;
126         }
127
128         /*
129          * Initialize the Monitoring Timer.
130          */
131         INIT_DELAYED_WORK(&cb->monitor_work, myrb_monitor);
132         queue_delayed_work(cb->work_q, &cb->monitor_work, 1);
133
134         return true;
135 }
136
137 /**
138  * myrb_destroy_mempools - tears down the memory pools for the controller
139  */
140 static void myrb_destroy_mempools(struct myrb_hba *cb)
141 {
142         cancel_delayed_work_sync(&cb->monitor_work);
143         destroy_workqueue(cb->work_q);
144
145         dma_pool_destroy(cb->sg_pool);
146         dma_pool_destroy(cb->dcdb_pool);
147 }
148
149 /**
150  * myrb_reset_cmd - reset command block
151  */
152 static inline void myrb_reset_cmd(struct myrb_cmdblk *cmd_blk)
153 {
154         union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
155
156         memset(mbox, 0, sizeof(union myrb_cmd_mbox));
157         cmd_blk->status = 0;
158 }
159
160 /**
161  * myrb_qcmd - queues command block for execution
162  */
163 static void myrb_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
164 {
165         void __iomem *base = cb->io_base;
166         union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
167         union myrb_cmd_mbox *next_mbox = cb->next_cmd_mbox;
168
169         cb->write_cmd_mbox(next_mbox, mbox);
170         if (cb->prev_cmd_mbox1->words[0] == 0 ||
171             cb->prev_cmd_mbox2->words[0] == 0)
172                 cb->get_cmd_mbox(base);
173         cb->prev_cmd_mbox2 = cb->prev_cmd_mbox1;
174         cb->prev_cmd_mbox1 = next_mbox;
175         if (++next_mbox > cb->last_cmd_mbox)
176                 next_mbox = cb->first_cmd_mbox;
177         cb->next_cmd_mbox = next_mbox;
178 }
179
180 /**
181  * myrb_exec_cmd - executes command block and waits for completion.
182  *
183  * Return: command status
184  */
185 static unsigned short myrb_exec_cmd(struct myrb_hba *cb,
186                 struct myrb_cmdblk *cmd_blk)
187 {
188         DECLARE_COMPLETION_ONSTACK(cmpl);
189         unsigned long flags;
190
191         cmd_blk->completion = &cmpl;
192
193         spin_lock_irqsave(&cb->queue_lock, flags);
194         cb->qcmd(cb, cmd_blk);
195         spin_unlock_irqrestore(&cb->queue_lock, flags);
196
197         WARN_ON(in_interrupt());
198         wait_for_completion(&cmpl);
199         return cmd_blk->status;
200 }
201
202 /**
203  * myrb_exec_type3 - executes a type 3 command and waits for completion.
204  *
205  * Return: command status
206  */
207 static unsigned short myrb_exec_type3(struct myrb_hba *cb,
208                 enum myrb_cmd_opcode op, dma_addr_t addr)
209 {
210         struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
211         union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
212         unsigned short status;
213
214         mutex_lock(&cb->dcmd_mutex);
215         myrb_reset_cmd(cmd_blk);
216         mbox->type3.id = MYRB_DCMD_TAG;
217         mbox->type3.opcode = op;
218         mbox->type3.addr = addr;
219         status = myrb_exec_cmd(cb, cmd_blk);
220         mutex_unlock(&cb->dcmd_mutex);
221         return status;
222 }
223
224 /**
225  * myrb_exec_type3D - executes a type 3D command and waits for completion.
226  *
227  * Return: command status
228  */
229 static unsigned short myrb_exec_type3D(struct myrb_hba *cb,
230                 enum myrb_cmd_opcode op, struct scsi_device *sdev,
231                 struct myrb_pdev_state *pdev_info)
232 {
233         struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
234         union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
235         unsigned short status;
236         dma_addr_t pdev_info_addr;
237
238         pdev_info_addr = dma_map_single(&cb->pdev->dev, pdev_info,
239                                         sizeof(struct myrb_pdev_state),
240                                         DMA_FROM_DEVICE);
241         if (dma_mapping_error(&cb->pdev->dev, pdev_info_addr))
242                 return MYRB_STATUS_SUBSYS_FAILED;
243
244         mutex_lock(&cb->dcmd_mutex);
245         myrb_reset_cmd(cmd_blk);
246         mbox->type3D.id = MYRB_DCMD_TAG;
247         mbox->type3D.opcode = op;
248         mbox->type3D.channel = sdev->channel;
249         mbox->type3D.target = sdev->id;
250         mbox->type3D.addr = pdev_info_addr;
251         status = myrb_exec_cmd(cb, cmd_blk);
252         mutex_unlock(&cb->dcmd_mutex);
253         dma_unmap_single(&cb->pdev->dev, pdev_info_addr,
254                          sizeof(struct myrb_pdev_state), DMA_FROM_DEVICE);
255         if (status == MYRB_STATUS_SUCCESS &&
256             mbox->type3D.opcode == MYRB_CMD_GET_DEVICE_STATE_OLD)
257                 myrb_translate_devstate(pdev_info);
258
259         return status;
260 }
261
262 static char *myrb_event_msg[] = {
263         "killed because write recovery failed",
264         "killed because of SCSI bus reset failure",
265         "killed because of double check condition",
266         "killed because it was removed",
267         "killed because of gross error on SCSI chip",
268         "killed because of bad tag returned from drive",
269         "killed because of timeout on SCSI command",
270         "killed because of reset SCSI command issued from system",
271         "killed because busy or parity error count exceeded limit",
272         "killed because of 'kill drive' command from system",
273         "killed because of selection timeout",
274         "killed due to SCSI phase sequence error",
275         "killed due to unknown status",
276 };
277
278 /**
279  * myrb_get_event - get event log from HBA
280  * @cb: pointer to the hba structure
281  * @event: number of the event
282  *
283  * Execute a type 3E command and logs the event message
284  */
285 static void myrb_get_event(struct myrb_hba *cb, unsigned int event)
286 {
287         struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
288         union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
289         struct myrb_log_entry *ev_buf;
290         dma_addr_t ev_addr;
291         unsigned short status;
292
293         ev_buf = dma_alloc_coherent(&cb->pdev->dev,
294                                     sizeof(struct myrb_log_entry),
295                                     &ev_addr, GFP_KERNEL);
296         if (!ev_buf)
297                 return;
298
299         myrb_reset_cmd(cmd_blk);
300         mbox->type3E.id = MYRB_MCMD_TAG;
301         mbox->type3E.opcode = MYRB_CMD_EVENT_LOG_OPERATION;
302         mbox->type3E.optype = DAC960_V1_GetEventLogEntry;
303         mbox->type3E.opqual = 1;
304         mbox->type3E.ev_seq = event;
305         mbox->type3E.addr = ev_addr;
306         status = myrb_exec_cmd(cb, cmd_blk);
307         if (status != MYRB_STATUS_SUCCESS)
308                 shost_printk(KERN_INFO, cb->host,
309                              "Failed to get event log %d, status %04x\n",
310                              event, status);
311
312         else if (ev_buf->seq_num == event) {
313                 struct scsi_sense_hdr sshdr;
314
315                 memset(&sshdr, 0, sizeof(sshdr));
316                 scsi_normalize_sense(ev_buf->sense, 32, &sshdr);
317
318                 if (sshdr.sense_key == VENDOR_SPECIFIC &&
319                     sshdr.asc == 0x80 &&
320                     sshdr.ascq < ARRAY_SIZE(myrb_event_msg))
321                         shost_printk(KERN_CRIT, cb->host,
322                                      "Physical drive %d:%d: %s\n",
323                                      ev_buf->channel, ev_buf->target,
324                                      myrb_event_msg[sshdr.ascq]);
325                 else
326                         shost_printk(KERN_CRIT, cb->host,
327                                      "Physical drive %d:%d: Sense: %X/%02X/%02X\n",
328                                      ev_buf->channel, ev_buf->target,
329                                      sshdr.sense_key, sshdr.asc, sshdr.ascq);
330         }
331
332         dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_log_entry),
333                           ev_buf, ev_addr);
334 }
335
336 /**
337  * myrb_get_errtable - retrieves the error table from the controller
338  *
339  * Executes a type 3 command and logs the error table from the controller.
340  */
341 static void myrb_get_errtable(struct myrb_hba *cb)
342 {
343         struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
344         union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
345         unsigned short status;
346         struct myrb_error_entry old_table[MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS];
347
348         memcpy(&old_table, cb->err_table, sizeof(old_table));
349
350         myrb_reset_cmd(cmd_blk);
351         mbox->type3.id = MYRB_MCMD_TAG;
352         mbox->type3.opcode = MYRB_CMD_GET_ERROR_TABLE;
353         mbox->type3.addr = cb->err_table_addr;
354         status = myrb_exec_cmd(cb, cmd_blk);
355         if (status == MYRB_STATUS_SUCCESS) {
356                 struct myrb_error_entry *table = cb->err_table;
357                 struct myrb_error_entry *new, *old;
358                 size_t err_table_offset;
359                 struct scsi_device *sdev;
360
361                 shost_for_each_device(sdev, cb->host) {
362                         if (sdev->channel >= myrb_logical_channel(cb->host))
363                                 continue;
364                         err_table_offset = sdev->channel * MYRB_MAX_TARGETS
365                                 + sdev->id;
366                         new = table + err_table_offset;
367                         old = &old_table[err_table_offset];
368                         if (new->parity_err == old->parity_err &&
369                             new->soft_err == old->soft_err &&
370                             new->hard_err == old->hard_err &&
371                             new->misc_err == old->misc_err)
372                                 continue;
373                         sdev_printk(KERN_CRIT, sdev,
374                                     "Errors: Parity = %d, Soft = %d, Hard = %d, Misc = %d\n",
375                                     new->parity_err, new->soft_err,
376                                     new->hard_err, new->misc_err);
377                 }
378         }
379 }
380
381 /**
382  * myrb_get_ldev_info - retrieves the logical device table from the controller
383  *
384  * Executes a type 3 command and updates the logical device table.
385  *
386  * Return: command status
387  */
388 static unsigned short myrb_get_ldev_info(struct myrb_hba *cb)
389 {
390         unsigned short status;
391         int ldev_num, ldev_cnt = cb->enquiry->ldev_count;
392         struct Scsi_Host *shost = cb->host;
393
394         status = myrb_exec_type3(cb, MYRB_CMD_GET_LDEV_INFO,
395                                  cb->ldev_info_addr);
396         if (status != MYRB_STATUS_SUCCESS)
397                 return status;
398
399         for (ldev_num = 0; ldev_num < ldev_cnt; ldev_num++) {
400                 struct myrb_ldev_info *old = NULL;
401                 struct myrb_ldev_info *new = cb->ldev_info_buf + ldev_num;
402                 struct scsi_device *sdev;
403
404                 sdev = scsi_device_lookup(shost, myrb_logical_channel(shost),
405                                           ldev_num, 0);
406                 if (!sdev) {
407                         if (new->state == MYRB_DEVICE_OFFLINE)
408                                 continue;
409                         shost_printk(KERN_INFO, shost,
410                                      "Adding Logical Drive %d in state %s\n",
411                                      ldev_num, myrb_devstate_name(new->state));
412                         scsi_add_device(shost, myrb_logical_channel(shost),
413                                         ldev_num, 0);
414                         continue;
415                 }
416                 old = sdev->hostdata;
417                 if (new->state != old->state)
418                         shost_printk(KERN_INFO, shost,
419                                      "Logical Drive %d is now %s\n",
420                                      ldev_num, myrb_devstate_name(new->state));
421                 if (new->wb_enabled != old->wb_enabled)
422                         sdev_printk(KERN_INFO, sdev,
423                                     "Logical Drive is now WRITE %s\n",
424                                     (new->wb_enabled ? "BACK" : "THRU"));
425                 memcpy(old, new, sizeof(*new));
426                 scsi_device_put(sdev);
427         }
428         return status;
429 }
430
431 /**
432  * myrb_get_rbld_progress - get rebuild progress information
433  *
434  * Executes a type 3 command and returns the rebuild progress
435  * information.
436  *
437  * Return: command status
438  */
439 static unsigned short myrb_get_rbld_progress(struct myrb_hba *cb,
440                 struct myrb_rbld_progress *rbld)
441 {
442         struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
443         union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
444         struct myrb_rbld_progress *rbld_buf;
445         dma_addr_t rbld_addr;
446         unsigned short status;
447
448         rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
449                                       sizeof(struct myrb_rbld_progress),
450                                       &rbld_addr, GFP_KERNEL);
451         if (!rbld_buf)
452                 return MYRB_STATUS_RBLD_NOT_CHECKED;
453
454         myrb_reset_cmd(cmd_blk);
455         mbox->type3.id = MYRB_MCMD_TAG;
456         mbox->type3.opcode = MYRB_CMD_GET_REBUILD_PROGRESS;
457         mbox->type3.addr = rbld_addr;
458         status = myrb_exec_cmd(cb, cmd_blk);
459         if (rbld)
460                 memcpy(rbld, rbld_buf, sizeof(struct myrb_rbld_progress));
461         dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
462                           rbld_buf, rbld_addr);
463         return status;
464 }
465
466 /**
467  * myrb_update_rbld_progress - updates the rebuild status
468  *
469  * Updates the rebuild status for the attached logical devices.
470  *
471  */
472 static void myrb_update_rbld_progress(struct myrb_hba *cb)
473 {
474         struct myrb_rbld_progress rbld_buf;
475         unsigned short status;
476
477         status = myrb_get_rbld_progress(cb, &rbld_buf);
478         if (status == MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS &&
479             cb->last_rbld_status == MYRB_STATUS_SUCCESS)
480                 status = MYRB_STATUS_RBLD_SUCCESS;
481         if (status != MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS) {
482                 unsigned int blocks_done =
483                         rbld_buf.ldev_size - rbld_buf.blocks_left;
484                 struct scsi_device *sdev;
485
486                 sdev = scsi_device_lookup(cb->host,
487                                           myrb_logical_channel(cb->host),
488                                           rbld_buf.ldev_num, 0);
489                 if (!sdev)
490                         return;
491
492                 switch (status) {
493                 case MYRB_STATUS_SUCCESS:
494                         sdev_printk(KERN_INFO, sdev,
495                                     "Rebuild in Progress, %d%% completed\n",
496                                     (100 * (blocks_done >> 7))
497                                     / (rbld_buf.ldev_size >> 7));
498                         break;
499                 case MYRB_STATUS_RBLD_FAILED_LDEV_FAILURE:
500                         sdev_printk(KERN_INFO, sdev,
501                                     "Rebuild Failed due to Logical Drive Failure\n");
502                         break;
503                 case MYRB_STATUS_RBLD_FAILED_BADBLOCKS:
504                         sdev_printk(KERN_INFO, sdev,
505                                     "Rebuild Failed due to Bad Blocks on Other Drives\n");
506                         break;
507                 case MYRB_STATUS_RBLD_FAILED_NEW_DRIVE_FAILED:
508                         sdev_printk(KERN_INFO, sdev,
509                                     "Rebuild Failed due to Failure of Drive Being Rebuilt\n");
510                         break;
511                 case MYRB_STATUS_RBLD_SUCCESS:
512                         sdev_printk(KERN_INFO, sdev,
513                                     "Rebuild Completed Successfully\n");
514                         break;
515                 case MYRB_STATUS_RBLD_SUCCESS_TERMINATED:
516                         sdev_printk(KERN_INFO, sdev,
517                                      "Rebuild Successfully Terminated\n");
518                         break;
519                 default:
520                         break;
521                 }
522                 scsi_device_put(sdev);
523         }
524         cb->last_rbld_status = status;
525 }
526
527 /**
528  * myrb_get_cc_progress - retrieve the rebuild status
529  *
530  * Execute a type 3 Command and fetch the rebuild / consistency check
531  * status.
532  */
533 static void myrb_get_cc_progress(struct myrb_hba *cb)
534 {
535         struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
536         union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
537         struct myrb_rbld_progress *rbld_buf;
538         dma_addr_t rbld_addr;
539         unsigned short status;
540
541         rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
542                                       sizeof(struct myrb_rbld_progress),
543                                       &rbld_addr, GFP_KERNEL);
544         if (!rbld_buf) {
545                 cb->need_cc_status = true;
546                 return;
547         }
548         myrb_reset_cmd(cmd_blk);
549         mbox->type3.id = MYRB_MCMD_TAG;
550         mbox->type3.opcode = MYRB_CMD_REBUILD_STAT;
551         mbox->type3.addr = rbld_addr;
552         status = myrb_exec_cmd(cb, cmd_blk);
553         if (status == MYRB_STATUS_SUCCESS) {
554                 unsigned int ldev_num = rbld_buf->ldev_num;
555                 unsigned int ldev_size = rbld_buf->ldev_size;
556                 unsigned int blocks_done =
557                         ldev_size - rbld_buf->blocks_left;
558                 struct scsi_device *sdev;
559
560                 sdev = scsi_device_lookup(cb->host,
561                                           myrb_logical_channel(cb->host),
562                                           ldev_num, 0);
563                 if (sdev) {
564                         sdev_printk(KERN_INFO, sdev,
565                                     "Consistency Check in Progress: %d%% completed\n",
566                                     (100 * (blocks_done >> 7))
567                                     / (ldev_size >> 7));
568                         scsi_device_put(sdev);
569                 }
570         }
571         dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
572                           rbld_buf, rbld_addr);
573 }
574
575 /**
576  * myrb_bgi_control - updates background initialisation status
577  *
578  * Executes a type 3B command and updates the background initialisation status
579  */
580 static void myrb_bgi_control(struct myrb_hba *cb)
581 {
582         struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
583         union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
584         struct myrb_bgi_status *bgi, *last_bgi;
585         dma_addr_t bgi_addr;
586         struct scsi_device *sdev = NULL;
587         unsigned short status;
588
589         bgi = dma_alloc_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
590                                  &bgi_addr, GFP_KERNEL);
591         if (!bgi) {
592                 shost_printk(KERN_ERR, cb->host,
593                              "Failed to allocate bgi memory\n");
594                 return;
595         }
596         myrb_reset_cmd(cmd_blk);
597         mbox->type3B.id = MYRB_DCMD_TAG;
598         mbox->type3B.opcode = MYRB_CMD_BGI_CONTROL;
599         mbox->type3B.optype = 0x20;
600         mbox->type3B.addr = bgi_addr;
601         status = myrb_exec_cmd(cb, cmd_blk);
602         last_bgi = &cb->bgi_status;
603         sdev = scsi_device_lookup(cb->host,
604                                   myrb_logical_channel(cb->host),
605                                   bgi->ldev_num, 0);
606         switch (status) {
607         case MYRB_STATUS_SUCCESS:
608                 switch (bgi->status) {
609                 case MYRB_BGI_INVALID:
610                         break;
611                 case MYRB_BGI_STARTED:
612                         if (!sdev)
613                                 break;
614                         sdev_printk(KERN_INFO, sdev,
615                                     "Background Initialization Started\n");
616                         break;
617                 case MYRB_BGI_INPROGRESS:
618                         if (!sdev)
619                                 break;
620                         if (bgi->blocks_done == last_bgi->blocks_done &&
621                             bgi->ldev_num == last_bgi->ldev_num)
622                                 break;
623                         sdev_printk(KERN_INFO, sdev,
624                                  "Background Initialization in Progress: %d%% completed\n",
625                                  (100 * (bgi->blocks_done >> 7))
626                                  / (bgi->ldev_size >> 7));
627                         break;
628                 case MYRB_BGI_SUSPENDED:
629                         if (!sdev)
630                                 break;
631                         sdev_printk(KERN_INFO, sdev,
632                                     "Background Initialization Suspended\n");
633                         break;
634                 case MYRB_BGI_CANCELLED:
635                         if (!sdev)
636                                 break;
637                         sdev_printk(KERN_INFO, sdev,
638                                     "Background Initialization Cancelled\n");
639                         break;
640                 }
641                 memcpy(&cb->bgi_status, bgi, sizeof(struct myrb_bgi_status));
642                 break;
643         case MYRB_STATUS_BGI_SUCCESS:
644                 if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
645                         sdev_printk(KERN_INFO, sdev,
646                                     "Background Initialization Completed Successfully\n");
647                 cb->bgi_status.status = MYRB_BGI_INVALID;
648                 break;
649         case MYRB_STATUS_BGI_ABORTED:
650                 if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
651                         sdev_printk(KERN_INFO, sdev,
652                                     "Background Initialization Aborted\n");
653                 /* Fallthrough */
654         case MYRB_STATUS_NO_BGI_INPROGRESS:
655                 cb->bgi_status.status = MYRB_BGI_INVALID;
656                 break;
657         }
658         if (sdev)
659                 scsi_device_put(sdev);
660         dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
661                           bgi, bgi_addr);
662 }
663
664 /**
665  * myrb_hba_enquiry - updates the controller status
666  *
667  * Executes a DAC_V1_Enquiry command and updates the controller status.
668  *
669  * Return: command status
670  */
671 static unsigned short myrb_hba_enquiry(struct myrb_hba *cb)
672 {
673         struct myrb_enquiry old, *new;
674         unsigned short status;
675
676         memcpy(&old, cb->enquiry, sizeof(struct myrb_enquiry));
677
678         status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY, cb->enquiry_addr);
679         if (status != MYRB_STATUS_SUCCESS)
680                 return status;
681
682         new = cb->enquiry;
683         if (new->ldev_count > old.ldev_count) {
684                 int ldev_num = old.ldev_count - 1;
685
686                 while (++ldev_num < new->ldev_count)
687                         shost_printk(KERN_CRIT, cb->host,
688                                      "Logical Drive %d Now Exists\n",
689                                      ldev_num);
690         }
691         if (new->ldev_count < old.ldev_count) {
692                 int ldev_num = new->ldev_count - 1;
693
694                 while (++ldev_num < old.ldev_count)
695                         shost_printk(KERN_CRIT, cb->host,
696                                      "Logical Drive %d No Longer Exists\n",
697                                      ldev_num);
698         }
699         if (new->status.deferred != old.status.deferred)
700                 shost_printk(KERN_CRIT, cb->host,
701                              "Deferred Write Error Flag is now %s\n",
702                              (new->status.deferred ? "TRUE" : "FALSE"));
703         if (new->ev_seq != old.ev_seq) {
704                 cb->new_ev_seq = new->ev_seq;
705                 cb->need_err_info = true;
706                 shost_printk(KERN_INFO, cb->host,
707                              "Event log %d/%d (%d/%d) available\n",
708                              cb->old_ev_seq, cb->new_ev_seq,
709                              old.ev_seq, new->ev_seq);
710         }
711         if ((new->ldev_critical > 0 &&
712              new->ldev_critical != old.ldev_critical) ||
713             (new->ldev_offline > 0 &&
714              new->ldev_offline != old.ldev_offline) ||
715             (new->ldev_count != old.ldev_count)) {
716                 shost_printk(KERN_INFO, cb->host,
717                              "Logical drive count changed (%d/%d/%d)\n",
718                              new->ldev_critical,
719                              new->ldev_offline,
720                              new->ldev_count);
721                 cb->need_ldev_info = true;
722         }
723         if (new->pdev_dead > 0 ||
724             new->pdev_dead != old.pdev_dead ||
725             time_after_eq(jiffies, cb->secondary_monitor_time
726                           + MYRB_SECONDARY_MONITOR_INTERVAL)) {
727                 cb->need_bgi_status = cb->bgi_status_supported;
728                 cb->secondary_monitor_time = jiffies;
729         }
730         if (new->rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
731             new->rbld == MYRB_BG_RBLD_IN_PROGRESS ||
732             old.rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
733             old.rbld == MYRB_BG_RBLD_IN_PROGRESS) {
734                 cb->need_rbld = true;
735                 cb->rbld_first = (new->ldev_critical < old.ldev_critical);
736         }
737         if (old.rbld == MYRB_BG_CHECK_IN_PROGRESS)
738                 switch (new->rbld) {
739                 case MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS:
740                         shost_printk(KERN_INFO, cb->host,
741                                      "Consistency Check Completed Successfully\n");
742                         break;
743                 case MYRB_STDBY_RBLD_IN_PROGRESS:
744                 case MYRB_BG_RBLD_IN_PROGRESS:
745                         break;
746                 case MYRB_BG_CHECK_IN_PROGRESS:
747                         cb->need_cc_status = true;
748                         break;
749                 case MYRB_STDBY_RBLD_COMPLETED_WITH_ERROR:
750                         shost_printk(KERN_INFO, cb->host,
751                                      "Consistency Check Completed with Error\n");
752                         break;
753                 case MYRB_BG_RBLD_OR_CHECK_FAILED_DRIVE_FAILED:
754                         shost_printk(KERN_INFO, cb->host,
755                                      "Consistency Check Failed - Physical Device Failed\n");
756                         break;
757                 case MYRB_BG_RBLD_OR_CHECK_FAILED_LDEV_FAILED:
758                         shost_printk(KERN_INFO, cb->host,
759                                      "Consistency Check Failed - Logical Drive Failed\n");
760                         break;
761                 case MYRB_BG_RBLD_OR_CHECK_FAILED_OTHER:
762                         shost_printk(KERN_INFO, cb->host,
763                                      "Consistency Check Failed - Other Causes\n");
764                         break;
765                 case MYRB_BG_RBLD_OR_CHECK_SUCCESS_TERMINATED:
766                         shost_printk(KERN_INFO, cb->host,
767                                      "Consistency Check Successfully Terminated\n");
768                         break;
769                 }
770         else if (new->rbld == MYRB_BG_CHECK_IN_PROGRESS)
771                 cb->need_cc_status = true;
772
773         return MYRB_STATUS_SUCCESS;
774 }
775
776 /**
777  * myrb_set_pdev_state - sets the device state for a physical device
778  *
779  * Return: command status
780  */
781 static unsigned short myrb_set_pdev_state(struct myrb_hba *cb,
782                 struct scsi_device *sdev, enum myrb_devstate state)
783 {
784         struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
785         union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
786         unsigned short status;
787
788         mutex_lock(&cb->dcmd_mutex);
789         mbox->type3D.opcode = MYRB_CMD_START_DEVICE;
790         mbox->type3D.id = MYRB_DCMD_TAG;
791         mbox->type3D.channel = sdev->channel;
792         mbox->type3D.target = sdev->id;
793         mbox->type3D.state = state & 0x1F;
794         status = myrb_exec_cmd(cb, cmd_blk);
795         mutex_unlock(&cb->dcmd_mutex);
796
797         return status;
798 }
799
800 /**
801  * myrb_enable_mmio - enables the Memory Mailbox Interface
802  *
803  * PD and P controller types have no memory mailbox, but still need the
804  * other dma mapped memory.
805  *
806  * Return: true on success, false otherwise.
807  */
808 static bool myrb_enable_mmio(struct myrb_hba *cb, mbox_mmio_init_t mmio_init_fn)
809 {
810         void __iomem *base = cb->io_base;
811         struct pci_dev *pdev = cb->pdev;
812         size_t err_table_size;
813         size_t ldev_info_size;
814         union myrb_cmd_mbox *cmd_mbox_mem;
815         struct myrb_stat_mbox *stat_mbox_mem;
816         union myrb_cmd_mbox mbox;
817         unsigned short status;
818
819         memset(&mbox, 0, sizeof(union myrb_cmd_mbox));
820
821         if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
822                 dev_err(&pdev->dev, "DMA mask out of range\n");
823                 return false;
824         }
825
826         cb->enquiry = dma_alloc_coherent(&pdev->dev,
827                                          sizeof(struct myrb_enquiry),
828                                          &cb->enquiry_addr, GFP_KERNEL);
829         if (!cb->enquiry)
830                 return false;
831
832         err_table_size = sizeof(struct myrb_error_entry) *
833                 MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
834         cb->err_table = dma_alloc_coherent(&pdev->dev, err_table_size,
835                                            &cb->err_table_addr, GFP_KERNEL);
836         if (!cb->err_table)
837                 return false;
838
839         ldev_info_size = sizeof(struct myrb_ldev_info) * MYRB_MAX_LDEVS;
840         cb->ldev_info_buf = dma_alloc_coherent(&pdev->dev, ldev_info_size,
841                                                &cb->ldev_info_addr, GFP_KERNEL);
842         if (!cb->ldev_info_buf)
843                 return false;
844
845         /*
846          * Skip mailbox initialisation for PD and P Controllers
847          */
848         if (!mmio_init_fn)
849                 return true;
850
851         /* These are the base addresses for the command memory mailbox array */
852         cb->cmd_mbox_size =  MYRB_CMD_MBOX_COUNT * sizeof(union myrb_cmd_mbox);
853         cb->first_cmd_mbox = dma_alloc_coherent(&pdev->dev,
854                                                 cb->cmd_mbox_size,
855                                                 &cb->cmd_mbox_addr,
856                                                 GFP_KERNEL);
857         if (!cb->first_cmd_mbox)
858                 return false;
859
860         cmd_mbox_mem = cb->first_cmd_mbox;
861         cmd_mbox_mem += MYRB_CMD_MBOX_COUNT - 1;
862         cb->last_cmd_mbox = cmd_mbox_mem;
863         cb->next_cmd_mbox = cb->first_cmd_mbox;
864         cb->prev_cmd_mbox1 = cb->last_cmd_mbox;
865         cb->prev_cmd_mbox2 = cb->last_cmd_mbox - 1;
866
867         /* These are the base addresses for the status memory mailbox array */
868         cb->stat_mbox_size = MYRB_STAT_MBOX_COUNT *
869             sizeof(struct myrb_stat_mbox);
870         cb->first_stat_mbox = dma_alloc_coherent(&pdev->dev,
871                                                  cb->stat_mbox_size,
872                                                  &cb->stat_mbox_addr,
873                                                  GFP_KERNEL);
874         if (!cb->first_stat_mbox)
875                 return false;
876
877         stat_mbox_mem = cb->first_stat_mbox;
878         stat_mbox_mem += MYRB_STAT_MBOX_COUNT - 1;
879         cb->last_stat_mbox = stat_mbox_mem;
880         cb->next_stat_mbox = cb->first_stat_mbox;
881
882         /* Enable the Memory Mailbox Interface. */
883         cb->dual_mode_interface = true;
884         mbox.typeX.opcode = 0x2B;
885         mbox.typeX.id = 0;
886         mbox.typeX.opcode2 = 0x14;
887         mbox.typeX.cmd_mbox_addr = cb->cmd_mbox_addr;
888         mbox.typeX.stat_mbox_addr = cb->stat_mbox_addr;
889
890         status = mmio_init_fn(pdev, base, &mbox);
891         if (status != MYRB_STATUS_SUCCESS) {
892                 cb->dual_mode_interface = false;
893                 mbox.typeX.opcode2 = 0x10;
894                 status = mmio_init_fn(pdev, base, &mbox);
895                 if (status != MYRB_STATUS_SUCCESS) {
896                         dev_err(&pdev->dev,
897                                 "Failed to enable mailbox, statux %02X\n",
898                                 status);
899                         return false;
900                 }
901         }
902         return true;
903 }
904
905 /**
906  * myrb_get_hba_config - reads the configuration information
907  *
908  * Reads the configuration information from the controller and
909  * initializes the controller structure.
910  *
911  * Return: 0 on success, errno otherwise
912  */
913 static int myrb_get_hba_config(struct myrb_hba *cb)
914 {
915         struct myrb_enquiry2 *enquiry2;
916         dma_addr_t enquiry2_addr;
917         struct myrb_config2 *config2;
918         dma_addr_t config2_addr;
919         struct Scsi_Host *shost = cb->host;
920         struct pci_dev *pdev = cb->pdev;
921         int pchan_max = 0, pchan_cur = 0;
922         unsigned short status;
923         int ret = -ENODEV, memsize = 0;
924
925         enquiry2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
926                                       &enquiry2_addr, GFP_KERNEL);
927         if (!enquiry2) {
928                 shost_printk(KERN_ERR, cb->host,
929                              "Failed to allocate V1 enquiry2 memory\n");
930                 return -ENOMEM;
931         }
932         config2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_config2),
933                                      &config2_addr, GFP_KERNEL);
934         if (!config2) {
935                 shost_printk(KERN_ERR, cb->host,
936                              "Failed to allocate V1 config2 memory\n");
937                 dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
938                                   enquiry2, enquiry2_addr);
939                 return -ENOMEM;
940         }
941         mutex_lock(&cb->dma_mutex);
942         status = myrb_hba_enquiry(cb);
943         mutex_unlock(&cb->dma_mutex);
944         if (status != MYRB_STATUS_SUCCESS) {
945                 shost_printk(KERN_WARNING, cb->host,
946                              "Failed it issue V1 Enquiry\n");
947                 goto out_free;
948         }
949
950         status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY2, enquiry2_addr);
951         if (status != MYRB_STATUS_SUCCESS) {
952                 shost_printk(KERN_WARNING, cb->host,
953                              "Failed to issue V1 Enquiry2\n");
954                 goto out_free;
955         }
956
957         status = myrb_exec_type3(cb, MYRB_CMD_READ_CONFIG2, config2_addr);
958         if (status != MYRB_STATUS_SUCCESS) {
959                 shost_printk(KERN_WARNING, cb->host,
960                              "Failed to issue ReadConfig2\n");
961                 goto out_free;
962         }
963
964         status = myrb_get_ldev_info(cb);
965         if (status != MYRB_STATUS_SUCCESS) {
966                 shost_printk(KERN_WARNING, cb->host,
967                              "Failed to get logical drive information\n");
968                 goto out_free;
969         }
970
971         /*
972          * Initialize the Controller Model Name and Full Model Name fields.
973          */
974         switch (enquiry2->hw.sub_model) {
975         case DAC960_V1_P_PD_PU:
976                 if (enquiry2->scsi_cap.bus_speed == MYRB_SCSI_SPEED_ULTRA)
977                         strcpy(cb->model_name, "DAC960PU");
978                 else
979                         strcpy(cb->model_name, "DAC960PD");
980                 break;
981         case DAC960_V1_PL:
982                 strcpy(cb->model_name, "DAC960PL");
983                 break;
984         case DAC960_V1_PG:
985                 strcpy(cb->model_name, "DAC960PG");
986                 break;
987         case DAC960_V1_PJ:
988                 strcpy(cb->model_name, "DAC960PJ");
989                 break;
990         case DAC960_V1_PR:
991                 strcpy(cb->model_name, "DAC960PR");
992                 break;
993         case DAC960_V1_PT:
994                 strcpy(cb->model_name, "DAC960PT");
995                 break;
996         case DAC960_V1_PTL0:
997                 strcpy(cb->model_name, "DAC960PTL0");
998                 break;
999         case DAC960_V1_PRL:
1000                 strcpy(cb->model_name, "DAC960PRL");
1001                 break;
1002         case DAC960_V1_PTL1:
1003                 strcpy(cb->model_name, "DAC960PTL1");
1004                 break;
1005         case DAC960_V1_1164P:
1006                 strcpy(cb->model_name, "eXtremeRAID 1100");
1007                 break;
1008         default:
1009                 shost_printk(KERN_WARNING, cb->host,
1010                              "Unknown Model %X\n",
1011                              enquiry2->hw.sub_model);
1012                 goto out;
1013         }
1014         /*
1015          * Initialize the Controller Firmware Version field and verify that it
1016          * is a supported firmware version.
1017          * The supported firmware versions are:
1018          *
1019          * DAC1164P                 5.06 and above
1020          * DAC960PTL/PRL/PJ/PG      4.06 and above
1021          * DAC960PU/PD/PL           3.51 and above
1022          * DAC960PU/PD/PL/P         2.73 and above
1023          */
1024 #if defined(CONFIG_ALPHA)
1025         /*
1026          * DEC Alpha machines were often equipped with DAC960 cards that were
1027          * OEMed from Mylex, and had their own custom firmware. Version 2.70,
1028          * the last custom FW revision to be released by DEC for these older
1029          * controllers, appears to work quite well with this driver.
1030          *
1031          * Cards tested successfully were several versions each of the PD and
1032          * PU, called by DEC the KZPSC and KZPAC, respectively, and having
1033          * the Manufacturer Numbers (from Mylex), usually on a sticker on the
1034          * back of the board, of:
1035          *
1036          * KZPSC:  D040347 (1-channel) or D040348 (2-channel)
1037          *         or D040349 (3-channel)
1038          * KZPAC:  D040395 (1-channel) or D040396 (2-channel)
1039          *         or D040397 (3-channel)
1040          */
1041 # define FIRMWARE_27X   "2.70"
1042 #else
1043 # define FIRMWARE_27X   "2.73"
1044 #endif
1045
1046         if (enquiry2->fw.major_version == 0) {
1047                 enquiry2->fw.major_version = cb->enquiry->fw_major_version;
1048                 enquiry2->fw.minor_version = cb->enquiry->fw_minor_version;
1049                 enquiry2->fw.firmware_type = '0';
1050                 enquiry2->fw.turn_id = 0;
1051         }
1052         snprintf(cb->fw_version, sizeof(cb->fw_version),
1053                 "%d.%02d-%c-%02d",
1054                 enquiry2->fw.major_version,
1055                 enquiry2->fw.minor_version,
1056                 enquiry2->fw.firmware_type,
1057                 enquiry2->fw.turn_id);
1058         if (!((enquiry2->fw.major_version == 5 &&
1059                enquiry2->fw.minor_version >= 6) ||
1060               (enquiry2->fw.major_version == 4 &&
1061                enquiry2->fw.minor_version >= 6) ||
1062               (enquiry2->fw.major_version == 3 &&
1063                enquiry2->fw.minor_version >= 51) ||
1064               (enquiry2->fw.major_version == 2 &&
1065                strcmp(cb->fw_version, FIRMWARE_27X) >= 0))) {
1066                 shost_printk(KERN_WARNING, cb->host,
1067                         "Firmware Version '%s' unsupported\n",
1068                         cb->fw_version);
1069                 goto out;
1070         }
1071         /*
1072          * Initialize the Channels, Targets, Memory Size, and SAF-TE
1073          * Enclosure Management Enabled fields.
1074          */
1075         switch (enquiry2->hw.model) {
1076         case MYRB_5_CHANNEL_BOARD:
1077                 pchan_max = 5;
1078                 break;
1079         case MYRB_3_CHANNEL_BOARD:
1080         case MYRB_3_CHANNEL_ASIC_DAC:
1081                 pchan_max = 3;
1082                 break;
1083         case MYRB_2_CHANNEL_BOARD:
1084                 pchan_max = 2;
1085                 break;
1086         default:
1087                 pchan_max = enquiry2->cfg_chan;
1088                 break;
1089         }
1090         pchan_cur = enquiry2->cur_chan;
1091         if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_32BIT)
1092                 cb->bus_width = 32;
1093         else if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_16BIT)
1094                 cb->bus_width = 16;
1095         else
1096                 cb->bus_width = 8;
1097         cb->ldev_block_size = enquiry2->ldev_block_size;
1098         shost->max_channel = pchan_cur;
1099         shost->max_id = enquiry2->max_targets;
1100         memsize = enquiry2->mem_size >> 20;
1101         cb->safte_enabled = (enquiry2->fault_mgmt == MYRB_FAULT_SAFTE);
1102         /*
1103          * Initialize the Controller Queue Depth, Driver Queue Depth,
1104          * Logical Drive Count, Maximum Blocks per Command, Controller
1105          * Scatter/Gather Limit, and Driver Scatter/Gather Limit.
1106          * The Driver Queue Depth must be at most one less than the
1107          * Controller Queue Depth to allow for an automatic drive
1108          * rebuild operation.
1109          */
1110         shost->can_queue = cb->enquiry->max_tcq;
1111         if (shost->can_queue < 3)
1112                 shost->can_queue = enquiry2->max_cmds;
1113         if (shost->can_queue < 3)
1114                 /* Play safe and disable TCQ */
1115                 shost->can_queue = 1;
1116
1117         if (shost->can_queue > MYRB_CMD_MBOX_COUNT - 2)
1118                 shost->can_queue = MYRB_CMD_MBOX_COUNT - 2;
1119         shost->max_sectors = enquiry2->max_sectors;
1120         shost->sg_tablesize = enquiry2->max_sge;
1121         if (shost->sg_tablesize > MYRB_SCATTER_GATHER_LIMIT)
1122                 shost->sg_tablesize = MYRB_SCATTER_GATHER_LIMIT;
1123         /*
1124          * Initialize the Stripe Size, Segment Size, and Geometry Translation.
1125          */
1126         cb->stripe_size = config2->blocks_per_stripe * config2->block_factor
1127                 >> (10 - MYRB_BLKSIZE_BITS);
1128         cb->segment_size = config2->blocks_per_cacheline * config2->block_factor
1129                 >> (10 - MYRB_BLKSIZE_BITS);
1130         /* Assume 255/63 translation */
1131         cb->ldev_geom_heads = 255;
1132         cb->ldev_geom_sectors = 63;
1133         if (config2->drive_geometry) {
1134                 cb->ldev_geom_heads = 128;
1135                 cb->ldev_geom_sectors = 32;
1136         }
1137
1138         /*
1139          * Initialize the Background Initialization Status.
1140          */
1141         if ((cb->fw_version[0] == '4' &&
1142              strcmp(cb->fw_version, "4.08") >= 0) ||
1143             (cb->fw_version[0] == '5' &&
1144              strcmp(cb->fw_version, "5.08") >= 0)) {
1145                 cb->bgi_status_supported = true;
1146                 myrb_bgi_control(cb);
1147         }
1148         cb->last_rbld_status = MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS;
1149         ret = 0;
1150
1151 out:
1152         shost_printk(KERN_INFO, cb->host,
1153                 "Configuring %s PCI RAID Controller\n", cb->model_name);
1154         shost_printk(KERN_INFO, cb->host,
1155                 "  Firmware Version: %s, Memory Size: %dMB\n",
1156                 cb->fw_version, memsize);
1157         if (cb->io_addr == 0)
1158                 shost_printk(KERN_INFO, cb->host,
1159                         "  I/O Address: n/a, PCI Address: 0x%lX, IRQ Channel: %d\n",
1160                         (unsigned long)cb->pci_addr, cb->irq);
1161         else
1162                 shost_printk(KERN_INFO, cb->host,
1163                         "  I/O Address: 0x%lX, PCI Address: 0x%lX, IRQ Channel: %d\n",
1164                         (unsigned long)cb->io_addr, (unsigned long)cb->pci_addr,
1165                         cb->irq);
1166         shost_printk(KERN_INFO, cb->host,
1167                 "  Controller Queue Depth: %d, Maximum Blocks per Command: %d\n",
1168                 cb->host->can_queue, cb->host->max_sectors);
1169         shost_printk(KERN_INFO, cb->host,
1170                      "  Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n",
1171                      cb->host->can_queue, cb->host->sg_tablesize,
1172                      MYRB_SCATTER_GATHER_LIMIT);
1173         shost_printk(KERN_INFO, cb->host,
1174                      "  Stripe Size: %dKB, Segment Size: %dKB, BIOS Geometry: %d/%d%s\n",
1175                      cb->stripe_size, cb->segment_size,
1176                      cb->ldev_geom_heads, cb->ldev_geom_sectors,
1177                      cb->safte_enabled ?
1178                      "  SAF-TE Enclosure Management Enabled" : "");
1179         shost_printk(KERN_INFO, cb->host,
1180                      "  Physical: %d/%d channels %d/%d/%d devices\n",
1181                      pchan_cur, pchan_max, 0, cb->enquiry->pdev_dead,
1182                      cb->host->max_id);
1183
1184         shost_printk(KERN_INFO, cb->host,
1185                      "  Logical: 1/1 channels, %d/%d disks\n",
1186                      cb->enquiry->ldev_count, MYRB_MAX_LDEVS);
1187
1188 out_free:
1189         dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
1190                           enquiry2, enquiry2_addr);
1191         dma_free_coherent(&pdev->dev, sizeof(struct myrb_config2),
1192                           config2, config2_addr);
1193
1194         return ret;
1195 }
1196
1197 /**
1198  * myrb_unmap - unmaps controller structures
1199  */
1200 static void myrb_unmap(struct myrb_hba *cb)
1201 {
1202         if (cb->ldev_info_buf) {
1203                 size_t ldev_info_size = sizeof(struct myrb_ldev_info) *
1204                         MYRB_MAX_LDEVS;
1205                 dma_free_coherent(&cb->pdev->dev, ldev_info_size,
1206                                   cb->ldev_info_buf, cb->ldev_info_addr);
1207                 cb->ldev_info_buf = NULL;
1208         }
1209         if (cb->err_table) {
1210                 size_t err_table_size = sizeof(struct myrb_error_entry) *
1211                         MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
1212                 dma_free_coherent(&cb->pdev->dev, err_table_size,
1213                                   cb->err_table, cb->err_table_addr);
1214                 cb->err_table = NULL;
1215         }
1216         if (cb->enquiry) {
1217                 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_enquiry),
1218                                   cb->enquiry, cb->enquiry_addr);
1219                 cb->enquiry = NULL;
1220         }
1221         if (cb->first_stat_mbox) {
1222                 dma_free_coherent(&cb->pdev->dev, cb->stat_mbox_size,
1223                                   cb->first_stat_mbox, cb->stat_mbox_addr);
1224                 cb->first_stat_mbox = NULL;
1225         }
1226         if (cb->first_cmd_mbox) {
1227                 dma_free_coherent(&cb->pdev->dev, cb->cmd_mbox_size,
1228                                   cb->first_cmd_mbox, cb->cmd_mbox_addr);
1229                 cb->first_cmd_mbox = NULL;
1230         }
1231 }
1232
1233 /**
1234  * myrb_cleanup - cleanup controller structures
1235  */
1236 static void myrb_cleanup(struct myrb_hba *cb)
1237 {
1238         struct pci_dev *pdev = cb->pdev;
1239
1240         /* Free the memory mailbox, status, and related structures */
1241         myrb_unmap(cb);
1242
1243         if (cb->mmio_base) {
1244                 if (cb->disable_intr)
1245                         cb->disable_intr(cb->io_base);
1246                 iounmap(cb->mmio_base);
1247         }
1248         if (cb->irq)
1249                 free_irq(cb->irq, cb);
1250         if (cb->io_addr)
1251                 release_region(cb->io_addr, 0x80);
1252         pci_set_drvdata(pdev, NULL);
1253         pci_disable_device(pdev);
1254         scsi_host_put(cb->host);
1255 }
1256
1257 static int myrb_host_reset(struct scsi_cmnd *scmd)
1258 {
1259         struct Scsi_Host *shost = scmd->device->host;
1260         struct myrb_hba *cb = shost_priv(shost);
1261
1262         cb->reset(cb->io_base);
1263         return SUCCESS;
1264 }
1265
1266 static int myrb_pthru_queuecommand(struct Scsi_Host *shost,
1267                 struct scsi_cmnd *scmd)
1268 {
1269         struct myrb_hba *cb = shost_priv(shost);
1270         struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
1271         union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
1272         struct myrb_dcdb *dcdb;
1273         dma_addr_t dcdb_addr;
1274         struct scsi_device *sdev = scmd->device;
1275         struct scatterlist *sgl;
1276         unsigned long flags;
1277         int nsge;
1278
1279         myrb_reset_cmd(cmd_blk);
1280         dcdb = dma_pool_alloc(cb->dcdb_pool, GFP_ATOMIC, &dcdb_addr);
1281         if (!dcdb)
1282                 return SCSI_MLQUEUE_HOST_BUSY;
1283         nsge = scsi_dma_map(scmd);
1284         if (nsge > 1) {
1285                 dma_pool_free(cb->dcdb_pool, dcdb, dcdb_addr);
1286                 scmd->result = (DID_ERROR << 16);
1287                 scmd->scsi_done(scmd);
1288                 return 0;
1289         }
1290
1291         mbox->type3.opcode = MYRB_CMD_DCDB;
1292         mbox->type3.id = scmd->request->tag + 3;
1293         mbox->type3.addr = dcdb_addr;
1294         dcdb->channel = sdev->channel;
1295         dcdb->target = sdev->id;
1296         switch (scmd->sc_data_direction) {
1297         case DMA_NONE:
1298                 dcdb->data_xfer = MYRB_DCDB_XFER_NONE;
1299                 break;
1300         case DMA_TO_DEVICE:
1301                 dcdb->data_xfer = MYRB_DCDB_XFER_SYSTEM_TO_DEVICE;
1302                 break;
1303         case DMA_FROM_DEVICE:
1304                 dcdb->data_xfer = MYRB_DCDB_XFER_DEVICE_TO_SYSTEM;
1305                 break;
1306         default:
1307                 dcdb->data_xfer = MYRB_DCDB_XFER_ILLEGAL;
1308                 break;
1309         }
1310         dcdb->early_status = false;
1311         if (scmd->request->timeout <= 10)
1312                 dcdb->timeout = MYRB_DCDB_TMO_10_SECS;
1313         else if (scmd->request->timeout <= 60)
1314                 dcdb->timeout = MYRB_DCDB_TMO_60_SECS;
1315         else if (scmd->request->timeout <= 600)
1316                 dcdb->timeout = MYRB_DCDB_TMO_10_MINS;
1317         else
1318                 dcdb->timeout = MYRB_DCDB_TMO_24_HRS;
1319         dcdb->no_autosense = false;
1320         dcdb->allow_disconnect = true;
1321         sgl = scsi_sglist(scmd);
1322         dcdb->dma_addr = sg_dma_address(sgl);
1323         if (sg_dma_len(sgl) > USHRT_MAX) {
1324                 dcdb->xfer_len_lo = sg_dma_len(sgl) & 0xffff;
1325                 dcdb->xfer_len_hi4 = sg_dma_len(sgl) >> 16;
1326         } else {
1327                 dcdb->xfer_len_lo = sg_dma_len(sgl);
1328                 dcdb->xfer_len_hi4 = 0;
1329         }
1330         dcdb->cdb_len = scmd->cmd_len;
1331         dcdb->sense_len = sizeof(dcdb->sense);
1332         memcpy(&dcdb->cdb, scmd->cmnd, scmd->cmd_len);
1333
1334         spin_lock_irqsave(&cb->queue_lock, flags);
1335         cb->qcmd(cb, cmd_blk);
1336         spin_unlock_irqrestore(&cb->queue_lock, flags);
1337         return 0;
1338 }
1339
1340 static void myrb_inquiry(struct myrb_hba *cb,
1341                 struct scsi_cmnd *scmd)
1342 {
1343         unsigned char inq[36] = {
1344                 0x00, 0x00, 0x03, 0x02, 0x20, 0x00, 0x01, 0x00,
1345                 0x4d, 0x59, 0x4c, 0x45, 0x58, 0x20, 0x20, 0x20,
1346                 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1347                 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1348                 0x20, 0x20, 0x20, 0x20,
1349         };
1350
1351         if (cb->bus_width > 16)
1352                 inq[7] |= 1 << 6;
1353         if (cb->bus_width > 8)
1354                 inq[7] |= 1 << 5;
1355         memcpy(&inq[16], cb->model_name, 16);
1356         memcpy(&inq[32], cb->fw_version, 1);
1357         memcpy(&inq[33], &cb->fw_version[2], 2);
1358         memcpy(&inq[35], &cb->fw_version[7], 1);
1359
1360         scsi_sg_copy_from_buffer(scmd, (void *)inq, 36);
1361 }
1362
1363 static void
1364 myrb_mode_sense(struct myrb_hba *cb, struct scsi_cmnd *scmd,
1365                 struct myrb_ldev_info *ldev_info)
1366 {
1367         unsigned char modes[32], *mode_pg;
1368         bool dbd;
1369         size_t mode_len;
1370
1371         dbd = (scmd->cmnd[1] & 0x08) == 0x08;
1372         if (dbd) {
1373                 mode_len = 24;
1374                 mode_pg = &modes[4];
1375         } else {
1376                 mode_len = 32;
1377                 mode_pg = &modes[12];
1378         }
1379         memset(modes, 0, sizeof(modes));
1380         modes[0] = mode_len - 1;
1381         if (!dbd) {
1382                 unsigned char *block_desc = &modes[4];
1383
1384                 modes[3] = 8;
1385                 put_unaligned_be32(ldev_info->size, &block_desc[0]);
1386                 put_unaligned_be32(cb->ldev_block_size, &block_desc[5]);
1387         }
1388         mode_pg[0] = 0x08;
1389         mode_pg[1] = 0x12;
1390         if (ldev_info->wb_enabled)
1391                 mode_pg[2] |= 0x04;
1392         if (cb->segment_size) {
1393                 mode_pg[2] |= 0x08;
1394                 put_unaligned_be16(cb->segment_size, &mode_pg[14]);
1395         }
1396
1397         scsi_sg_copy_from_buffer(scmd, modes, mode_len);
1398 }
1399
1400 static void myrb_request_sense(struct myrb_hba *cb,
1401                 struct scsi_cmnd *scmd)
1402 {
1403         scsi_build_sense_buffer(0, scmd->sense_buffer,
1404                                 NO_SENSE, 0, 0);
1405         scsi_sg_copy_from_buffer(scmd, scmd->sense_buffer,
1406                                  SCSI_SENSE_BUFFERSIZE);
1407 }
1408
1409 static void myrb_read_capacity(struct myrb_hba *cb, struct scsi_cmnd *scmd,
1410                 struct myrb_ldev_info *ldev_info)
1411 {
1412         unsigned char data[8];
1413
1414         dev_dbg(&scmd->device->sdev_gendev,
1415                 "Capacity %u, blocksize %u\n",
1416                 ldev_info->size, cb->ldev_block_size);
1417         put_unaligned_be32(ldev_info->size - 1, &data[0]);
1418         put_unaligned_be32(cb->ldev_block_size, &data[4]);
1419         scsi_sg_copy_from_buffer(scmd, data, 8);
1420 }
1421
1422 static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
1423                 struct scsi_cmnd *scmd)
1424 {
1425         struct myrb_hba *cb = shost_priv(shost);
1426         struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
1427         union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
1428         struct myrb_ldev_info *ldev_info;
1429         struct scsi_device *sdev = scmd->device;
1430         struct scatterlist *sgl;
1431         unsigned long flags;
1432         u64 lba;
1433         u32 block_cnt;
1434         int nsge;
1435
1436         ldev_info = sdev->hostdata;
1437         if (ldev_info->state != MYRB_DEVICE_ONLINE &&
1438             ldev_info->state != MYRB_DEVICE_WO) {
1439                 dev_dbg(&shost->shost_gendev, "ldev %u in state %x, skip\n",
1440                         sdev->id, ldev_info ? ldev_info->state : 0xff);
1441                 scmd->result = (DID_BAD_TARGET << 16);
1442                 scmd->scsi_done(scmd);
1443                 return 0;
1444         }
1445         switch (scmd->cmnd[0]) {
1446         case TEST_UNIT_READY:
1447                 scmd->result = (DID_OK << 16);
1448                 scmd->scsi_done(scmd);
1449                 return 0;
1450         case INQUIRY:
1451                 if (scmd->cmnd[1] & 1) {
1452                         /* Illegal request, invalid field in CDB */
1453                         scsi_build_sense_buffer(0, scmd->sense_buffer,
1454                                                 ILLEGAL_REQUEST, 0x24, 0);
1455                         scmd->result = (DRIVER_SENSE << 24) |
1456                                 SAM_STAT_CHECK_CONDITION;
1457                 } else {
1458                         myrb_inquiry(cb, scmd);
1459                         scmd->result = (DID_OK << 16);
1460                 }
1461                 scmd->scsi_done(scmd);
1462                 return 0;
1463         case SYNCHRONIZE_CACHE:
1464                 scmd->result = (DID_OK << 16);
1465                 scmd->scsi_done(scmd);
1466                 return 0;
1467         case MODE_SENSE:
1468                 if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
1469                     (scmd->cmnd[2] & 0x3F) != 0x08) {
1470                         /* Illegal request, invalid field in CDB */
1471                         scsi_build_sense_buffer(0, scmd->sense_buffer,
1472                                                 ILLEGAL_REQUEST, 0x24, 0);
1473                         scmd->result = (DRIVER_SENSE << 24) |
1474                                 SAM_STAT_CHECK_CONDITION;
1475                 } else {
1476                         myrb_mode_sense(cb, scmd, ldev_info);
1477                         scmd->result = (DID_OK << 16);
1478                 }
1479                 scmd->scsi_done(scmd);
1480                 return 0;
1481         case READ_CAPACITY:
1482                 if ((scmd->cmnd[1] & 1) ||
1483                     (scmd->cmnd[8] & 1)) {
1484                         /* Illegal request, invalid field in CDB */
1485                         scsi_build_sense_buffer(0, scmd->sense_buffer,
1486                                                 ILLEGAL_REQUEST, 0x24, 0);
1487                         scmd->result = (DRIVER_SENSE << 24) |
1488                                 SAM_STAT_CHECK_CONDITION;
1489                         scmd->scsi_done(scmd);
1490                         return 0;
1491                 }
1492                 lba = get_unaligned_be32(&scmd->cmnd[2]);
1493                 if (lba) {
1494                         /* Illegal request, invalid field in CDB */
1495                         scsi_build_sense_buffer(0, scmd->sense_buffer,
1496                                                 ILLEGAL_REQUEST, 0x24, 0);
1497                         scmd->result = (DRIVER_SENSE << 24) |
1498                                 SAM_STAT_CHECK_CONDITION;
1499                         scmd->scsi_done(scmd);
1500                         return 0;
1501                 }
1502                 myrb_read_capacity(cb, scmd, ldev_info);
1503                 scmd->scsi_done(scmd);
1504                 return 0;
1505         case REQUEST_SENSE:
1506                 myrb_request_sense(cb, scmd);
1507                 scmd->result = (DID_OK << 16);
1508                 return 0;
1509         case SEND_DIAGNOSTIC:
1510                 if (scmd->cmnd[1] != 0x04) {
1511                         /* Illegal request, invalid field in CDB */
1512                         scsi_build_sense_buffer(0, scmd->sense_buffer,
1513                                                 ILLEGAL_REQUEST, 0x24, 0);
1514                         scmd->result = (DRIVER_SENSE << 24) |
1515                                 SAM_STAT_CHECK_CONDITION;
1516                 } else {
1517                         /* Assume good status */
1518                         scmd->result = (DID_OK << 16);
1519                 }
1520                 scmd->scsi_done(scmd);
1521                 return 0;
1522         case READ_6:
1523                 if (ldev_info->state == MYRB_DEVICE_WO) {
1524                         /* Data protect, attempt to read invalid data */
1525                         scsi_build_sense_buffer(0, scmd->sense_buffer,
1526                                                 DATA_PROTECT, 0x21, 0x06);
1527                         scmd->result = (DRIVER_SENSE << 24) |
1528                                 SAM_STAT_CHECK_CONDITION;
1529                         scmd->scsi_done(scmd);
1530                         return 0;
1531                 }
1532                 /* fall through */
1533         case WRITE_6:
1534                 lba = (((scmd->cmnd[1] & 0x1F) << 16) |
1535                        (scmd->cmnd[2] << 8) |
1536                        scmd->cmnd[3]);
1537                 block_cnt = scmd->cmnd[4];
1538                 break;
1539         case READ_10:
1540                 if (ldev_info->state == MYRB_DEVICE_WO) {
1541                         /* Data protect, attempt to read invalid data */
1542                         scsi_build_sense_buffer(0, scmd->sense_buffer,
1543                                                 DATA_PROTECT, 0x21, 0x06);
1544                         scmd->result = (DRIVER_SENSE << 24) |
1545                                 SAM_STAT_CHECK_CONDITION;
1546                         scmd->scsi_done(scmd);
1547                         return 0;
1548                 }
1549                 /* fall through */
1550         case WRITE_10:
1551         case VERIFY:            /* 0x2F */
1552         case WRITE_VERIFY:      /* 0x2E */
1553                 lba = get_unaligned_be32(&scmd->cmnd[2]);
1554                 block_cnt = get_unaligned_be16(&scmd->cmnd[7]);
1555                 break;
1556         case READ_12:
1557                 if (ldev_info->state == MYRB_DEVICE_WO) {
1558                         /* Data protect, attempt to read invalid data */
1559                         scsi_build_sense_buffer(0, scmd->sense_buffer,
1560                                                 DATA_PROTECT, 0x21, 0x06);
1561                         scmd->result = (DRIVER_SENSE << 24) |
1562                                 SAM_STAT_CHECK_CONDITION;
1563                         scmd->scsi_done(scmd);
1564                         return 0;
1565                 }
1566                 /* fall through */
1567         case WRITE_12:
1568         case VERIFY_12: /* 0xAF */
1569         case WRITE_VERIFY_12:   /* 0xAE */
1570                 lba = get_unaligned_be32(&scmd->cmnd[2]);
1571                 block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
1572                 break;
1573         default:
1574                 /* Illegal request, invalid opcode */
1575                 scsi_build_sense_buffer(0, scmd->sense_buffer,
1576                                         ILLEGAL_REQUEST, 0x20, 0);
1577                 scmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
1578                 scmd->scsi_done(scmd);
1579                 return 0;
1580         }
1581
1582         myrb_reset_cmd(cmd_blk);
1583         mbox->type5.id = scmd->request->tag + 3;
1584         if (scmd->sc_data_direction == DMA_NONE)
1585                 goto submit;
1586         nsge = scsi_dma_map(scmd);
1587         if (nsge == 1) {
1588                 sgl = scsi_sglist(scmd);
1589                 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
1590                         mbox->type5.opcode = MYRB_CMD_READ;
1591                 else
1592                         mbox->type5.opcode = MYRB_CMD_WRITE;
1593
1594                 mbox->type5.ld.xfer_len = block_cnt;
1595                 mbox->type5.ld.ldev_num = sdev->id;
1596                 mbox->type5.lba = lba;
1597                 mbox->type5.addr = (u32)sg_dma_address(sgl);
1598         } else {
1599                 struct myrb_sge *hw_sgl;
1600                 dma_addr_t hw_sgl_addr;
1601                 int i;
1602
1603                 hw_sgl = dma_pool_alloc(cb->sg_pool, GFP_ATOMIC, &hw_sgl_addr);
1604                 if (!hw_sgl)
1605                         return SCSI_MLQUEUE_HOST_BUSY;
1606
1607                 cmd_blk->sgl = hw_sgl;
1608                 cmd_blk->sgl_addr = hw_sgl_addr;
1609
1610                 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
1611                         mbox->type5.opcode = MYRB_CMD_READ_SG;
1612                 else
1613                         mbox->type5.opcode = MYRB_CMD_WRITE_SG;
1614
1615                 mbox->type5.ld.xfer_len = block_cnt;
1616                 mbox->type5.ld.ldev_num = sdev->id;
1617                 mbox->type5.lba = lba;
1618                 mbox->type5.addr = hw_sgl_addr;
1619                 mbox->type5.sg_count = nsge;
1620
1621                 scsi_for_each_sg(scmd, sgl, nsge, i) {
1622                         hw_sgl->sge_addr = (u32)sg_dma_address(sgl);
1623                         hw_sgl->sge_count = (u32)sg_dma_len(sgl);
1624                         hw_sgl++;
1625                 }
1626         }
1627 submit:
1628         spin_lock_irqsave(&cb->queue_lock, flags);
1629         cb->qcmd(cb, cmd_blk);
1630         spin_unlock_irqrestore(&cb->queue_lock, flags);
1631
1632         return 0;
1633 }
1634
1635 static int myrb_queuecommand(struct Scsi_Host *shost,
1636                 struct scsi_cmnd *scmd)
1637 {
1638         struct scsi_device *sdev = scmd->device;
1639
1640         if (sdev->channel > myrb_logical_channel(shost)) {
1641                 scmd->result = (DID_BAD_TARGET << 16);
1642                 scmd->scsi_done(scmd);
1643                 return 0;
1644         }
1645         if (sdev->channel == myrb_logical_channel(shost))
1646                 return myrb_ldev_queuecommand(shost, scmd);
1647
1648         return myrb_pthru_queuecommand(shost, scmd);
1649 }
1650
1651 static int myrb_ldev_slave_alloc(struct scsi_device *sdev)
1652 {
1653         struct myrb_hba *cb = shost_priv(sdev->host);
1654         struct myrb_ldev_info *ldev_info;
1655         unsigned short ldev_num = sdev->id;
1656         enum raid_level level;
1657
1658         ldev_info = cb->ldev_info_buf + ldev_num;
1659         if (!ldev_info)
1660                 return -ENXIO;
1661
1662         sdev->hostdata = kzalloc(sizeof(*ldev_info), GFP_KERNEL);
1663         if (!sdev->hostdata)
1664                 return -ENOMEM;
1665         dev_dbg(&sdev->sdev_gendev,
1666                 "slave alloc ldev %d state %x\n",
1667                 ldev_num, ldev_info->state);
1668         memcpy(sdev->hostdata, ldev_info,
1669                sizeof(*ldev_info));
1670         switch (ldev_info->raid_level) {
1671         case MYRB_RAID_LEVEL0:
1672                 level = RAID_LEVEL_LINEAR;
1673                 break;
1674         case MYRB_RAID_LEVEL1:
1675                 level = RAID_LEVEL_1;
1676                 break;
1677         case MYRB_RAID_LEVEL3:
1678                 level = RAID_LEVEL_3;
1679                 break;
1680         case MYRB_RAID_LEVEL5:
1681                 level = RAID_LEVEL_5;
1682                 break;
1683         case MYRB_RAID_LEVEL6:
1684                 level = RAID_LEVEL_6;
1685                 break;
1686         case MYRB_RAID_JBOD:
1687                 level = RAID_LEVEL_JBOD;
1688                 break;
1689         default:
1690                 level = RAID_LEVEL_UNKNOWN;
1691                 break;
1692         }
1693         raid_set_level(myrb_raid_template, &sdev->sdev_gendev, level);
1694         return 0;
1695 }
1696
1697 static int myrb_pdev_slave_alloc(struct scsi_device *sdev)
1698 {
1699         struct myrb_hba *cb = shost_priv(sdev->host);
1700         struct myrb_pdev_state *pdev_info;
1701         unsigned short status;
1702
1703         if (sdev->id > MYRB_MAX_TARGETS)
1704                 return -ENXIO;
1705
1706         pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA);
1707         if (!pdev_info)
1708                 return -ENOMEM;
1709
1710         status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
1711                                   sdev, pdev_info);
1712         if (status != MYRB_STATUS_SUCCESS) {
1713                 dev_dbg(&sdev->sdev_gendev,
1714                         "Failed to get device state, status %x\n",
1715                         status);
1716                 kfree(pdev_info);
1717                 return -ENXIO;
1718         }
1719         if (!pdev_info->present) {
1720                 dev_dbg(&sdev->sdev_gendev,
1721                         "device not present, skip\n");
1722                 kfree(pdev_info);
1723                 return -ENXIO;
1724         }
1725         dev_dbg(&sdev->sdev_gendev,
1726                 "slave alloc pdev %d:%d state %x\n",
1727                 sdev->channel, sdev->id, pdev_info->state);
1728         sdev->hostdata = pdev_info;
1729
1730         return 0;
1731 }
1732
1733 static int myrb_slave_alloc(struct scsi_device *sdev)
1734 {
1735         if (sdev->channel > myrb_logical_channel(sdev->host))
1736                 return -ENXIO;
1737
1738         if (sdev->lun > 0)
1739                 return -ENXIO;
1740
1741         if (sdev->channel == myrb_logical_channel(sdev->host))
1742                 return myrb_ldev_slave_alloc(sdev);
1743
1744         return myrb_pdev_slave_alloc(sdev);
1745 }
1746
1747 static int myrb_slave_configure(struct scsi_device *sdev)
1748 {
1749         struct myrb_ldev_info *ldev_info;
1750
1751         if (sdev->channel > myrb_logical_channel(sdev->host))
1752                 return -ENXIO;
1753
1754         if (sdev->channel < myrb_logical_channel(sdev->host)) {
1755                 sdev->no_uld_attach = 1;
1756                 return 0;
1757         }
1758         if (sdev->lun != 0)
1759                 return -ENXIO;
1760
1761         ldev_info = sdev->hostdata;
1762         if (!ldev_info)
1763                 return -ENXIO;
1764         if (ldev_info->state != MYRB_DEVICE_ONLINE)
1765                 sdev_printk(KERN_INFO, sdev,
1766                             "Logical drive is %s\n",
1767                             myrb_devstate_name(ldev_info->state));
1768
1769         sdev->tagged_supported = 1;
1770         return 0;
1771 }
1772
1773 static void myrb_slave_destroy(struct scsi_device *sdev)
1774 {
1775         kfree(sdev->hostdata);
1776 }
1777
1778 static int myrb_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1779                 sector_t capacity, int geom[])
1780 {
1781         struct myrb_hba *cb = shost_priv(sdev->host);
1782
1783         geom[0] = cb->ldev_geom_heads;
1784         geom[1] = cb->ldev_geom_sectors;
1785         geom[2] = sector_div(capacity, geom[0] * geom[1]);
1786
1787         return 0;
1788 }
1789
1790 static ssize_t raid_state_show(struct device *dev,
1791                 struct device_attribute *attr, char *buf)
1792 {
1793         struct scsi_device *sdev = to_scsi_device(dev);
1794         struct myrb_hba *cb = shost_priv(sdev->host);
1795         int ret;
1796
1797         if (!sdev->hostdata)
1798                 return snprintf(buf, 16, "Unknown\n");
1799
1800         if (sdev->channel == myrb_logical_channel(sdev->host)) {
1801                 struct myrb_ldev_info *ldev_info = sdev->hostdata;
1802                 const char *name;
1803
1804                 name = myrb_devstate_name(ldev_info->state);
1805                 if (name)
1806                         ret = snprintf(buf, 32, "%s\n", name);
1807                 else
1808                         ret = snprintf(buf, 32, "Invalid (%02X)\n",
1809                                        ldev_info->state);
1810         } else {
1811                 struct myrb_pdev_state *pdev_info = sdev->hostdata;
1812                 unsigned short status;
1813                 const char *name;
1814
1815                 status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
1816                                           sdev, pdev_info);
1817                 if (status != MYRB_STATUS_SUCCESS)
1818                         sdev_printk(KERN_INFO, sdev,
1819                                     "Failed to get device state, status %x\n",
1820                                     status);
1821
1822                 if (!pdev_info->present)
1823                         name = "Removed";
1824                 else
1825                         name = myrb_devstate_name(pdev_info->state);
1826                 if (name)
1827                         ret = snprintf(buf, 32, "%s\n", name);
1828                 else
1829                         ret = snprintf(buf, 32, "Invalid (%02X)\n",
1830                                        pdev_info->state);
1831         }
1832         return ret;
1833 }
1834
1835 static ssize_t raid_state_store(struct device *dev,
1836                 struct device_attribute *attr, const char *buf, size_t count)
1837 {
1838         struct scsi_device *sdev = to_scsi_device(dev);
1839         struct myrb_hba *cb = shost_priv(sdev->host);
1840         struct myrb_pdev_state *pdev_info;
1841         enum myrb_devstate new_state;
1842         unsigned short status;
1843
1844         if (!strncmp(buf, "kill", 4) ||
1845             !strncmp(buf, "offline", 7))
1846                 new_state = MYRB_DEVICE_DEAD;
1847         else if (!strncmp(buf, "online", 6))
1848                 new_state = MYRB_DEVICE_ONLINE;
1849         else if (!strncmp(buf, "standby", 7))
1850                 new_state = MYRB_DEVICE_STANDBY;
1851         else
1852                 return -EINVAL;
1853
1854         pdev_info = sdev->hostdata;
1855         if (!pdev_info) {
1856                 sdev_printk(KERN_INFO, sdev,
1857                             "Failed - no physical device information\n");
1858                 return -ENXIO;
1859         }
1860         if (!pdev_info->present) {
1861                 sdev_printk(KERN_INFO, sdev,
1862                             "Failed - device not present\n");
1863                 return -ENXIO;
1864         }
1865
1866         if (pdev_info->state == new_state)
1867                 return count;
1868
1869         status = myrb_set_pdev_state(cb, sdev, new_state);
1870         switch (status) {
1871         case MYRB_STATUS_SUCCESS:
1872                 break;
1873         case MYRB_STATUS_START_DEVICE_FAILED:
1874                 sdev_printk(KERN_INFO, sdev,
1875                              "Failed - Unable to Start Device\n");
1876                 count = -EAGAIN;
1877                 break;
1878         case MYRB_STATUS_NO_DEVICE:
1879                 sdev_printk(KERN_INFO, sdev,
1880                             "Failed - No Device at Address\n");
1881                 count = -ENODEV;
1882                 break;
1883         case MYRB_STATUS_INVALID_CHANNEL_OR_TARGET:
1884                 sdev_printk(KERN_INFO, sdev,
1885                          "Failed - Invalid Channel or Target or Modifier\n");
1886                 count = -EINVAL;
1887                 break;
1888         case MYRB_STATUS_CHANNEL_BUSY:
1889                 sdev_printk(KERN_INFO, sdev,
1890                          "Failed - Channel Busy\n");
1891                 count = -EBUSY;
1892                 break;
1893         default:
1894                 sdev_printk(KERN_INFO, sdev,
1895                          "Failed - Unexpected Status %04X\n", status);
1896                 count = -EIO;
1897                 break;
1898         }
1899         return count;
1900 }
1901 static DEVICE_ATTR_RW(raid_state);
1902
1903 static ssize_t raid_level_show(struct device *dev,
1904                 struct device_attribute *attr, char *buf)
1905 {
1906         struct scsi_device *sdev = to_scsi_device(dev);
1907
1908         if (sdev->channel == myrb_logical_channel(sdev->host)) {
1909                 struct myrb_ldev_info *ldev_info = sdev->hostdata;
1910                 const char *name;
1911
1912                 if (!ldev_info)
1913                         return -ENXIO;
1914
1915                 name = myrb_raidlevel_name(ldev_info->raid_level);
1916                 if (!name)
1917                         return snprintf(buf, 32, "Invalid (%02X)\n",
1918                                         ldev_info->state);
1919                 return snprintf(buf, 32, "%s\n", name);
1920         }
1921         return snprintf(buf, 32, "Physical Drive\n");
1922 }
1923 static DEVICE_ATTR_RO(raid_level);
1924
1925 static ssize_t rebuild_show(struct device *dev,
1926                 struct device_attribute *attr, char *buf)
1927 {
1928         struct scsi_device *sdev = to_scsi_device(dev);
1929         struct myrb_hba *cb = shost_priv(sdev->host);
1930         struct myrb_rbld_progress rbld_buf;
1931         unsigned char status;
1932
1933         if (sdev->channel < myrb_logical_channel(sdev->host))
1934                 return snprintf(buf, 32, "physical device - not rebuilding\n");
1935
1936         status = myrb_get_rbld_progress(cb, &rbld_buf);
1937
1938         if (rbld_buf.ldev_num != sdev->id ||
1939             status != MYRB_STATUS_SUCCESS)
1940                 return snprintf(buf, 32, "not rebuilding\n");
1941
1942         return snprintf(buf, 32, "rebuilding block %u of %u\n",
1943                         rbld_buf.ldev_size - rbld_buf.blocks_left,
1944                         rbld_buf.ldev_size);
1945 }
1946
1947 static ssize_t rebuild_store(struct device *dev,
1948                 struct device_attribute *attr, const char *buf, size_t count)
1949 {
1950         struct scsi_device *sdev = to_scsi_device(dev);
1951         struct myrb_hba *cb = shost_priv(sdev->host);
1952         struct myrb_cmdblk *cmd_blk;
1953         union myrb_cmd_mbox *mbox;
1954         unsigned short status;
1955         int rc, start;
1956         const char *msg;
1957
1958         rc = kstrtoint(buf, 0, &start);
1959         if (rc)
1960                 return rc;
1961
1962         if (sdev->channel >= myrb_logical_channel(sdev->host))
1963                 return -ENXIO;
1964
1965         status = myrb_get_rbld_progress(cb, NULL);
1966         if (start) {
1967                 if (status == MYRB_STATUS_SUCCESS) {
1968                         sdev_printk(KERN_INFO, sdev,
1969                                     "Rebuild Not Initiated; already in progress\n");
1970                         return -EALREADY;
1971                 }
1972                 mutex_lock(&cb->dcmd_mutex);
1973                 cmd_blk = &cb->dcmd_blk;
1974                 myrb_reset_cmd(cmd_blk);
1975                 mbox = &cmd_blk->mbox;
1976                 mbox->type3D.opcode = MYRB_CMD_REBUILD_ASYNC;
1977                 mbox->type3D.id = MYRB_DCMD_TAG;
1978                 mbox->type3D.channel = sdev->channel;
1979                 mbox->type3D.target = sdev->id;
1980                 status = myrb_exec_cmd(cb, cmd_blk);
1981                 mutex_unlock(&cb->dcmd_mutex);
1982         } else {
1983                 struct pci_dev *pdev = cb->pdev;
1984                 unsigned char *rate;
1985                 dma_addr_t rate_addr;
1986
1987                 if (status != MYRB_STATUS_SUCCESS) {
1988                         sdev_printk(KERN_INFO, sdev,
1989                                     "Rebuild Not Cancelled; not in progress\n");
1990                         return 0;
1991                 }
1992
1993                 rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
1994                                           &rate_addr, GFP_KERNEL);
1995                 if (rate == NULL) {
1996                         sdev_printk(KERN_INFO, sdev,
1997                                     "Cancellation of Rebuild Failed - Out of Memory\n");
1998                         return -ENOMEM;
1999                 }
2000                 mutex_lock(&cb->dcmd_mutex);
2001                 cmd_blk = &cb->dcmd_blk;
2002                 myrb_reset_cmd(cmd_blk);
2003                 mbox = &cmd_blk->mbox;
2004                 mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
2005                 mbox->type3R.id = MYRB_DCMD_TAG;
2006                 mbox->type3R.rbld_rate = 0xFF;
2007                 mbox->type3R.addr = rate_addr;
2008                 status = myrb_exec_cmd(cb, cmd_blk);
2009                 dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
2010                 mutex_unlock(&cb->dcmd_mutex);
2011         }
2012         if (status == MYRB_STATUS_SUCCESS) {
2013                 sdev_printk(KERN_INFO, sdev, "Rebuild %s\n",
2014                             start ? "Initiated" : "Cancelled");
2015                 return count;
2016         }
2017         if (!start) {
2018                 sdev_printk(KERN_INFO, sdev,
2019                             "Rebuild Not Cancelled, status 0x%x\n",
2020                             status);
2021                 return -EIO;
2022         }
2023
2024         switch (status) {
2025         case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
2026                 msg = "Attempt to Rebuild Online or Unresponsive Drive";
2027                 break;
2028         case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
2029                 msg = "New Disk Failed During Rebuild";
2030                 break;
2031         case MYRB_STATUS_INVALID_ADDRESS:
2032                 msg = "Invalid Device Address";
2033                 break;
2034         case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
2035                 msg = "Already in Progress";
2036                 break;
2037         default:
2038                 msg = NULL;
2039                 break;
2040         }
2041         if (msg)
2042                 sdev_printk(KERN_INFO, sdev,
2043                             "Rebuild Failed - %s\n", msg);
2044         else
2045                 sdev_printk(KERN_INFO, sdev,
2046                             "Rebuild Failed, status 0x%x\n", status);
2047
2048         return -EIO;
2049 }
2050 static DEVICE_ATTR_RW(rebuild);
2051
2052 static ssize_t consistency_check_store(struct device *dev,
2053                 struct device_attribute *attr, const char *buf, size_t count)
2054 {
2055         struct scsi_device *sdev = to_scsi_device(dev);
2056         struct myrb_hba *cb = shost_priv(sdev->host);
2057         struct myrb_rbld_progress rbld_buf;
2058         struct myrb_cmdblk *cmd_blk;
2059         union myrb_cmd_mbox *mbox;
2060         unsigned short ldev_num = 0xFFFF;
2061         unsigned short status;
2062         int rc, start;
2063         const char *msg;
2064
2065         rc = kstrtoint(buf, 0, &start);
2066         if (rc)
2067                 return rc;
2068
2069         if (sdev->channel < myrb_logical_channel(sdev->host))
2070                 return -ENXIO;
2071
2072         status = myrb_get_rbld_progress(cb, &rbld_buf);
2073         if (start) {
2074                 if (status == MYRB_STATUS_SUCCESS) {
2075                         sdev_printk(KERN_INFO, sdev,
2076                                     "Check Consistency Not Initiated; already in progress\n");
2077                         return -EALREADY;
2078                 }
2079                 mutex_lock(&cb->dcmd_mutex);
2080                 cmd_blk = &cb->dcmd_blk;
2081                 myrb_reset_cmd(cmd_blk);
2082                 mbox = &cmd_blk->mbox;
2083                 mbox->type3C.opcode = MYRB_CMD_CHECK_CONSISTENCY_ASYNC;
2084                 mbox->type3C.id = MYRB_DCMD_TAG;
2085                 mbox->type3C.ldev_num = sdev->id;
2086                 mbox->type3C.auto_restore = true;
2087
2088                 status = myrb_exec_cmd(cb, cmd_blk);
2089                 mutex_unlock(&cb->dcmd_mutex);
2090         } else {
2091                 struct pci_dev *pdev = cb->pdev;
2092                 unsigned char *rate;
2093                 dma_addr_t rate_addr;
2094
2095                 if (ldev_num != sdev->id) {
2096                         sdev_printk(KERN_INFO, sdev,
2097                                     "Check Consistency Not Cancelled; not in progress\n");
2098                         return 0;
2099                 }
2100                 rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
2101                                           &rate_addr, GFP_KERNEL);
2102                 if (rate == NULL) {
2103                         sdev_printk(KERN_INFO, sdev,
2104                                     "Cancellation of Check Consistency Failed - Out of Memory\n");
2105                         return -ENOMEM;
2106                 }
2107                 mutex_lock(&cb->dcmd_mutex);
2108                 cmd_blk = &cb->dcmd_blk;
2109                 myrb_reset_cmd(cmd_blk);
2110                 mbox = &cmd_blk->mbox;
2111                 mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
2112                 mbox->type3R.id = MYRB_DCMD_TAG;
2113                 mbox->type3R.rbld_rate = 0xFF;
2114                 mbox->type3R.addr = rate_addr;
2115                 status = myrb_exec_cmd(cb, cmd_blk);
2116                 dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
2117                 mutex_unlock(&cb->dcmd_mutex);
2118         }
2119         if (status == MYRB_STATUS_SUCCESS) {
2120                 sdev_printk(KERN_INFO, sdev, "Check Consistency %s\n",
2121                             start ? "Initiated" : "Cancelled");
2122                 return count;
2123         }
2124         if (!start) {
2125                 sdev_printk(KERN_INFO, sdev,
2126                             "Check Consistency Not Cancelled, status 0x%x\n",
2127                             status);
2128                 return -EIO;
2129         }
2130
2131         switch (status) {
2132         case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
2133                 msg = "Dependent Physical Device is DEAD";
2134                 break;
2135         case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
2136                 msg = "New Disk Failed During Rebuild";
2137                 break;
2138         case MYRB_STATUS_INVALID_ADDRESS:
2139                 msg = "Invalid or Nonredundant Logical Drive";
2140                 break;
2141         case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
2142                 msg = "Already in Progress";
2143                 break;
2144         default:
2145                 msg = NULL;
2146                 break;
2147         }
2148         if (msg)
2149                 sdev_printk(KERN_INFO, sdev,
2150                             "Check Consistency Failed - %s\n", msg);
2151         else
2152                 sdev_printk(KERN_INFO, sdev,
2153                             "Check Consistency Failed, status 0x%x\n", status);
2154
2155         return -EIO;
2156 }
2157
2158 static ssize_t consistency_check_show(struct device *dev,
2159                 struct device_attribute *attr, char *buf)
2160 {
2161         return rebuild_show(dev, attr, buf);
2162 }
2163 static DEVICE_ATTR_RW(consistency_check);
2164
2165 static ssize_t ctlr_num_show(struct device *dev,
2166                 struct device_attribute *attr, char *buf)
2167 {
2168         struct Scsi_Host *shost = class_to_shost(dev);
2169         struct myrb_hba *cb = shost_priv(shost);
2170
2171         return snprintf(buf, 20, "%d\n", cb->ctlr_num);
2172 }
2173 static DEVICE_ATTR_RO(ctlr_num);
2174
2175 static ssize_t firmware_show(struct device *dev,
2176                 struct device_attribute *attr, char *buf)
2177 {
2178         struct Scsi_Host *shost = class_to_shost(dev);
2179         struct myrb_hba *cb = shost_priv(shost);
2180
2181         return snprintf(buf, 16, "%s\n", cb->fw_version);
2182 }
2183 static DEVICE_ATTR_RO(firmware);
2184
2185 static ssize_t model_show(struct device *dev,
2186                 struct device_attribute *attr, char *buf)
2187 {
2188         struct Scsi_Host *shost = class_to_shost(dev);
2189         struct myrb_hba *cb = shost_priv(shost);
2190
2191         return snprintf(buf, 16, "%s\n", cb->model_name);
2192 }
2193 static DEVICE_ATTR_RO(model);
2194
2195 static ssize_t flush_cache_store(struct device *dev,
2196                 struct device_attribute *attr, const char *buf, size_t count)
2197 {
2198         struct Scsi_Host *shost = class_to_shost(dev);
2199         struct myrb_hba *cb = shost_priv(shost);
2200         unsigned short status;
2201
2202         status = myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
2203         if (status == MYRB_STATUS_SUCCESS) {
2204                 shost_printk(KERN_INFO, shost,
2205                              "Cache Flush Completed\n");
2206                 return count;
2207         }
2208         shost_printk(KERN_INFO, shost,
2209                      "Cache Flush Failed, status %x\n", status);
2210         return -EIO;
2211 }
2212 static DEVICE_ATTR_WO(flush_cache);
2213
2214 static struct device_attribute *myrb_sdev_attrs[] = {
2215         &dev_attr_rebuild,
2216         &dev_attr_consistency_check,
2217         &dev_attr_raid_state,
2218         &dev_attr_raid_level,
2219         NULL,
2220 };
2221
2222 static struct device_attribute *myrb_shost_attrs[] = {
2223         &dev_attr_ctlr_num,
2224         &dev_attr_model,
2225         &dev_attr_firmware,
2226         &dev_attr_flush_cache,
2227         NULL,
2228 };
2229
2230 struct scsi_host_template myrb_template = {
2231         .module                 = THIS_MODULE,
2232         .name                   = "DAC960",
2233         .proc_name              = "myrb",
2234         .queuecommand           = myrb_queuecommand,
2235         .eh_host_reset_handler  = myrb_host_reset,
2236         .slave_alloc            = myrb_slave_alloc,
2237         .slave_configure        = myrb_slave_configure,
2238         .slave_destroy          = myrb_slave_destroy,
2239         .bios_param             = myrb_biosparam,
2240         .cmd_size               = sizeof(struct myrb_cmdblk),
2241         .shost_attrs            = myrb_shost_attrs,
2242         .sdev_attrs             = myrb_sdev_attrs,
2243         .this_id                = -1,
2244 };
2245
2246 /**
2247  * myrb_is_raid - return boolean indicating device is raid volume
2248  * @dev the device struct object
2249  */
2250 static int myrb_is_raid(struct device *dev)
2251 {
2252         struct scsi_device *sdev = to_scsi_device(dev);
2253
2254         return sdev->channel == myrb_logical_channel(sdev->host);
2255 }
2256
2257 /**
2258  * myrb_get_resync - get raid volume resync percent complete
2259  * @dev the device struct object
2260  */
2261 static void myrb_get_resync(struct device *dev)
2262 {
2263         struct scsi_device *sdev = to_scsi_device(dev);
2264         struct myrb_hba *cb = shost_priv(sdev->host);
2265         struct myrb_rbld_progress rbld_buf;
2266         unsigned int percent_complete = 0;
2267         unsigned short status;
2268         unsigned int ldev_size = 0, remaining = 0;
2269
2270         if (sdev->channel < myrb_logical_channel(sdev->host))
2271                 return;
2272         status = myrb_get_rbld_progress(cb, &rbld_buf);
2273         if (status == MYRB_STATUS_SUCCESS) {
2274                 if (rbld_buf.ldev_num == sdev->id) {
2275                         ldev_size = rbld_buf.ldev_size;
2276                         remaining = rbld_buf.blocks_left;
2277                 }
2278         }
2279         if (remaining && ldev_size)
2280                 percent_complete = (ldev_size - remaining) * 100 / ldev_size;
2281         raid_set_resync(myrb_raid_template, dev, percent_complete);
2282 }
2283
2284 /**
2285  * myrb_get_state - get raid volume status
2286  * @dev the device struct object
2287  */
2288 static void myrb_get_state(struct device *dev)
2289 {
2290         struct scsi_device *sdev = to_scsi_device(dev);
2291         struct myrb_hba *cb = shost_priv(sdev->host);
2292         struct myrb_ldev_info *ldev_info = sdev->hostdata;
2293         enum raid_state state = RAID_STATE_UNKNOWN;
2294         unsigned short status;
2295
2296         if (sdev->channel < myrb_logical_channel(sdev->host) || !ldev_info)
2297                 state = RAID_STATE_UNKNOWN;
2298         else {
2299                 status = myrb_get_rbld_progress(cb, NULL);
2300                 if (status == MYRB_STATUS_SUCCESS)
2301                         state = RAID_STATE_RESYNCING;
2302                 else {
2303                         switch (ldev_info->state) {
2304                         case MYRB_DEVICE_ONLINE:
2305                                 state = RAID_STATE_ACTIVE;
2306                                 break;
2307                         case MYRB_DEVICE_WO:
2308                         case MYRB_DEVICE_CRITICAL:
2309                                 state = RAID_STATE_DEGRADED;
2310                                 break;
2311                         default:
2312                                 state = RAID_STATE_OFFLINE;
2313                         }
2314                 }
2315         }
2316         raid_set_state(myrb_raid_template, dev, state);
2317 }
2318
2319 struct raid_function_template myrb_raid_functions = {
2320         .cookie         = &myrb_template,
2321         .is_raid        = myrb_is_raid,
2322         .get_resync     = myrb_get_resync,
2323         .get_state      = myrb_get_state,
2324 };
2325
2326 static void myrb_handle_scsi(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk,
2327                 struct scsi_cmnd *scmd)
2328 {
2329         unsigned short status;
2330
2331         if (!cmd_blk)
2332                 return;
2333
2334         scsi_dma_unmap(scmd);
2335
2336         if (cmd_blk->dcdb) {
2337                 memcpy(scmd->sense_buffer, &cmd_blk->dcdb->sense, 64);
2338                 dma_pool_free(cb->dcdb_pool, cmd_blk->dcdb,
2339                               cmd_blk->dcdb_addr);
2340                 cmd_blk->dcdb = NULL;
2341         }
2342         if (cmd_blk->sgl) {
2343                 dma_pool_free(cb->sg_pool, cmd_blk->sgl, cmd_blk->sgl_addr);
2344                 cmd_blk->sgl = NULL;
2345                 cmd_blk->sgl_addr = 0;
2346         }
2347         status = cmd_blk->status;
2348         switch (status) {
2349         case MYRB_STATUS_SUCCESS:
2350         case MYRB_STATUS_DEVICE_BUSY:
2351                 scmd->result = (DID_OK << 16) | status;
2352                 break;
2353         case MYRB_STATUS_BAD_DATA:
2354                 dev_dbg(&scmd->device->sdev_gendev,
2355                         "Bad Data Encountered\n");
2356                 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
2357                         /* Unrecovered read error */
2358                         scsi_build_sense_buffer(0, scmd->sense_buffer,
2359                                                 MEDIUM_ERROR, 0x11, 0);
2360                 else
2361                         /* Write error */
2362                         scsi_build_sense_buffer(0, scmd->sense_buffer,
2363                                                 MEDIUM_ERROR, 0x0C, 0);
2364                 scmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
2365                 break;
2366         case MYRB_STATUS_IRRECOVERABLE_DATA_ERROR:
2367                 scmd_printk(KERN_ERR, scmd, "Irrecoverable Data Error\n");
2368                 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
2369                         /* Unrecovered read error, auto-reallocation failed */
2370                         scsi_build_sense_buffer(0, scmd->sense_buffer,
2371                                                 MEDIUM_ERROR, 0x11, 0x04);
2372                 else
2373                         /* Write error, auto-reallocation failed */
2374                         scsi_build_sense_buffer(0, scmd->sense_buffer,
2375                                                 MEDIUM_ERROR, 0x0C, 0x02);
2376                 scmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
2377                 break;
2378         case MYRB_STATUS_LDRV_NONEXISTENT_OR_OFFLINE:
2379                 dev_dbg(&scmd->device->sdev_gendev,
2380                             "Logical Drive Nonexistent or Offline");
2381                 scmd->result = (DID_BAD_TARGET << 16);
2382                 break;
2383         case MYRB_STATUS_ACCESS_BEYOND_END_OF_LDRV:
2384                 dev_dbg(&scmd->device->sdev_gendev,
2385                             "Attempt to Access Beyond End of Logical Drive");
2386                 /* Logical block address out of range */
2387                 scsi_build_sense_buffer(0, scmd->sense_buffer,
2388                                         NOT_READY, 0x21, 0);
2389                 break;
2390         case MYRB_STATUS_DEVICE_NONRESPONSIVE:
2391                 dev_dbg(&scmd->device->sdev_gendev, "Device nonresponsive\n");
2392                 scmd->result = (DID_BAD_TARGET << 16);
2393                 break;
2394         default:
2395                 scmd_printk(KERN_ERR, scmd,
2396                             "Unexpected Error Status %04X", status);
2397                 scmd->result = (DID_ERROR << 16);
2398                 break;
2399         }
2400         scmd->scsi_done(scmd);
2401 }
2402
2403 static void myrb_handle_cmdblk(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
2404 {
2405         if (!cmd_blk)
2406                 return;
2407
2408         if (cmd_blk->completion) {
2409                 complete(cmd_blk->completion);
2410                 cmd_blk->completion = NULL;
2411         }
2412 }
2413
2414 static void myrb_monitor(struct work_struct *work)
2415 {
2416         struct myrb_hba *cb = container_of(work,
2417                         struct myrb_hba, monitor_work.work);
2418         struct Scsi_Host *shost = cb->host;
2419         unsigned long interval = MYRB_PRIMARY_MONITOR_INTERVAL;
2420
2421         dev_dbg(&shost->shost_gendev, "monitor tick\n");
2422
2423         if (cb->new_ev_seq > cb->old_ev_seq) {
2424                 int event = cb->old_ev_seq;
2425
2426                 dev_dbg(&shost->shost_gendev,
2427                         "get event log no %d/%d\n",
2428                         cb->new_ev_seq, event);
2429                 myrb_get_event(cb, event);
2430                 cb->old_ev_seq = event + 1;
2431                 interval = 10;
2432         } else if (cb->need_err_info) {
2433                 cb->need_err_info = false;
2434                 dev_dbg(&shost->shost_gendev, "get error table\n");
2435                 myrb_get_errtable(cb);
2436                 interval = 10;
2437         } else if (cb->need_rbld && cb->rbld_first) {
2438                 cb->need_rbld = false;
2439                 dev_dbg(&shost->shost_gendev,
2440                         "get rebuild progress\n");
2441                 myrb_update_rbld_progress(cb);
2442                 interval = 10;
2443         } else if (cb->need_ldev_info) {
2444                 cb->need_ldev_info = false;
2445                 dev_dbg(&shost->shost_gendev,
2446                         "get logical drive info\n");
2447                 myrb_get_ldev_info(cb);
2448                 interval = 10;
2449         } else if (cb->need_rbld) {
2450                 cb->need_rbld = false;
2451                 dev_dbg(&shost->shost_gendev,
2452                         "get rebuild progress\n");
2453                 myrb_update_rbld_progress(cb);
2454                 interval = 10;
2455         } else if (cb->need_cc_status) {
2456                 cb->need_cc_status = false;
2457                 dev_dbg(&shost->shost_gendev,
2458                         "get consistency check progress\n");
2459                 myrb_get_cc_progress(cb);
2460                 interval = 10;
2461         } else if (cb->need_bgi_status) {
2462                 cb->need_bgi_status = false;
2463                 dev_dbg(&shost->shost_gendev, "get background init status\n");
2464                 myrb_bgi_control(cb);
2465                 interval = 10;
2466         } else {
2467                 dev_dbg(&shost->shost_gendev, "new enquiry\n");
2468                 mutex_lock(&cb->dma_mutex);
2469                 myrb_hba_enquiry(cb);
2470                 mutex_unlock(&cb->dma_mutex);
2471                 if ((cb->new_ev_seq - cb->old_ev_seq > 0) ||
2472                     cb->need_err_info || cb->need_rbld ||
2473                     cb->need_ldev_info || cb->need_cc_status ||
2474                     cb->need_bgi_status) {
2475                         dev_dbg(&shost->shost_gendev,
2476                                 "reschedule monitor\n");
2477                         interval = 0;
2478                 }
2479         }
2480         if (interval > 1)
2481                 cb->primary_monitor_time = jiffies;
2482         queue_delayed_work(cb->work_q, &cb->monitor_work, interval);
2483 }
2484
2485 /**
2486  * myrb_err_status - reports controller BIOS messages
2487  *
2488  * Controller BIOS messages are passed through the Error Status Register
2489  * when the driver performs the BIOS handshaking.
2490  *
2491  * Return: true for fatal errors and false otherwise.
2492  */
2493 bool myrb_err_status(struct myrb_hba *cb, unsigned char error,
2494                 unsigned char parm0, unsigned char parm1)
2495 {
2496         struct pci_dev *pdev = cb->pdev;
2497
2498         switch (error) {
2499         case 0x00:
2500                 dev_info(&pdev->dev,
2501                          "Physical Device %d:%d Not Responding\n",
2502                          parm1, parm0);
2503                 break;
2504         case 0x08:
2505                 dev_notice(&pdev->dev, "Spinning Up Drives\n");
2506                 break;
2507         case 0x30:
2508                 dev_notice(&pdev->dev, "Configuration Checksum Error\n");
2509                 break;
2510         case 0x60:
2511                 dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n");
2512                 break;
2513         case 0x70:
2514                 dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n");
2515                 break;
2516         case 0x90:
2517                 dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n",
2518                            parm1, parm0);
2519                 break;
2520         case 0xA0:
2521                 dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n");
2522                 break;
2523         case 0xB0:
2524                 dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n");
2525                 break;
2526         case 0xD0:
2527                 dev_notice(&pdev->dev, "New Controller Configuration Found\n");
2528                 break;
2529         case 0xF0:
2530                 dev_err(&pdev->dev, "Fatal Memory Parity Error\n");
2531                 return true;
2532         default:
2533                 dev_err(&pdev->dev, "Unknown Initialization Error %02X\n",
2534                         error);
2535                 return true;
2536         }
2537         return false;
2538 }
2539
2540 /*
2541  * Hardware-specific functions
2542  */
2543
2544 /*
2545  * DAC960 LA Series Controllers
2546  */
2547
2548 static inline void DAC960_LA_hw_mbox_new_cmd(void __iomem *base)
2549 {
2550         writeb(DAC960_LA_IDB_HWMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
2551 }
2552
2553 static inline void DAC960_LA_ack_hw_mbox_status(void __iomem *base)
2554 {
2555         writeb(DAC960_LA_IDB_HWMBOX_ACK_STS, base + DAC960_LA_IDB_OFFSET);
2556 }
2557
2558 static inline void DAC960_LA_gen_intr(void __iomem *base)
2559 {
2560         writeb(DAC960_LA_IDB_GEN_IRQ, base + DAC960_LA_IDB_OFFSET);
2561 }
2562
2563 static inline void DAC960_LA_reset_ctrl(void __iomem *base)
2564 {
2565         writeb(DAC960_LA_IDB_CTRL_RESET, base + DAC960_LA_IDB_OFFSET);
2566 }
2567
2568 static inline void DAC960_LA_mem_mbox_new_cmd(void __iomem *base)
2569 {
2570         writeb(DAC960_LA_IDB_MMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
2571 }
2572
2573 static inline bool DAC960_LA_hw_mbox_is_full(void __iomem *base)
2574 {
2575         unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
2576
2577         return !(idb & DAC960_LA_IDB_HWMBOX_EMPTY);
2578 }
2579
2580 static inline bool DAC960_LA_init_in_progress(void __iomem *base)
2581 {
2582         unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
2583
2584         return !(idb & DAC960_LA_IDB_INIT_DONE);
2585 }
2586
2587 static inline void DAC960_LA_ack_hw_mbox_intr(void __iomem *base)
2588 {
2589         writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET);
2590 }
2591
2592 static inline void DAC960_LA_ack_mem_mbox_intr(void __iomem *base)
2593 {
2594         writeb(DAC960_LA_ODB_MMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET);
2595 }
2596
2597 static inline void DAC960_LA_ack_intr(void __iomem *base)
2598 {
2599         writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ | DAC960_LA_ODB_MMBOX_ACK_IRQ,
2600                base + DAC960_LA_ODB_OFFSET);
2601 }
2602
2603 static inline bool DAC960_LA_hw_mbox_status_available(void __iomem *base)
2604 {
2605         unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET);
2606
2607         return odb & DAC960_LA_ODB_HWMBOX_STS_AVAIL;
2608 }
2609
2610 static inline bool DAC960_LA_mem_mbox_status_available(void __iomem *base)
2611 {
2612         unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET);
2613
2614         return odb & DAC960_LA_ODB_MMBOX_STS_AVAIL;
2615 }
2616
2617 static inline void DAC960_LA_enable_intr(void __iomem *base)
2618 {
2619         unsigned char odb = 0xFF;
2620
2621         odb &= ~DAC960_LA_IRQMASK_DISABLE_IRQ;
2622         writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
2623 }
2624
2625 static inline void DAC960_LA_disable_intr(void __iomem *base)
2626 {
2627         unsigned char odb = 0xFF;
2628
2629         odb |= DAC960_LA_IRQMASK_DISABLE_IRQ;
2630         writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
2631 }
2632
2633 static inline bool DAC960_LA_intr_enabled(void __iomem *base)
2634 {
2635         unsigned char imask = readb(base + DAC960_LA_IRQMASK_OFFSET);
2636
2637         return !(imask & DAC960_LA_IRQMASK_DISABLE_IRQ);
2638 }
2639
2640 static inline void DAC960_LA_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
2641                 union myrb_cmd_mbox *mbox)
2642 {
2643         mem_mbox->words[1] = mbox->words[1];
2644         mem_mbox->words[2] = mbox->words[2];
2645         mem_mbox->words[3] = mbox->words[3];
2646         /* Memory barrier to prevent reordering */
2647         wmb();
2648         mem_mbox->words[0] = mbox->words[0];
2649         /* Memory barrier to force PCI access */
2650         mb();
2651 }
2652
2653 static inline void DAC960_LA_write_hw_mbox(void __iomem *base,
2654                 union myrb_cmd_mbox *mbox)
2655 {
2656         writel(mbox->words[0], base + DAC960_LA_CMDOP_OFFSET);
2657         writel(mbox->words[1], base + DAC960_LA_MBOX4_OFFSET);
2658         writel(mbox->words[2], base + DAC960_LA_MBOX8_OFFSET);
2659         writeb(mbox->bytes[12], base + DAC960_LA_MBOX12_OFFSET);
2660 }
2661
2662 static inline unsigned char DAC960_LA_read_status_cmd_ident(void __iomem *base)
2663 {
2664         return readb(base + DAC960_LA_STSID_OFFSET);
2665 }
2666
2667 static inline unsigned short DAC960_LA_read_status(void __iomem *base)
2668 {
2669         return readw(base + DAC960_LA_STS_OFFSET);
2670 }
2671
2672 static inline bool
2673 DAC960_LA_read_error_status(void __iomem *base, unsigned char *error,
2674                 unsigned char *param0, unsigned char *param1)
2675 {
2676         unsigned char errsts = readb(base + DAC960_LA_ERRSTS_OFFSET);
2677
2678         if (!(errsts & DAC960_LA_ERRSTS_PENDING))
2679                 return false;
2680         errsts &= ~DAC960_LA_ERRSTS_PENDING;
2681
2682         *error = errsts;
2683         *param0 = readb(base + DAC960_LA_CMDOP_OFFSET);
2684         *param1 = readb(base + DAC960_LA_CMDID_OFFSET);
2685         writeb(0xFF, base + DAC960_LA_ERRSTS_OFFSET);
2686         return true;
2687 }
2688
2689 static inline unsigned short
2690 DAC960_LA_mbox_init(struct pci_dev *pdev, void __iomem *base,
2691                 union myrb_cmd_mbox *mbox)
2692 {
2693         unsigned short status;
2694         int timeout = 0;
2695
2696         while (timeout < MYRB_MAILBOX_TIMEOUT) {
2697                 if (!DAC960_LA_hw_mbox_is_full(base))
2698                         break;
2699                 udelay(10);
2700                 timeout++;
2701         }
2702         if (DAC960_LA_hw_mbox_is_full(base)) {
2703                 dev_err(&pdev->dev,
2704                         "Timeout waiting for empty mailbox\n");
2705                 return MYRB_STATUS_SUBSYS_TIMEOUT;
2706         }
2707         DAC960_LA_write_hw_mbox(base, mbox);
2708         DAC960_LA_hw_mbox_new_cmd(base);
2709         timeout = 0;
2710         while (timeout < MYRB_MAILBOX_TIMEOUT) {
2711                 if (DAC960_LA_hw_mbox_status_available(base))
2712                         break;
2713                 udelay(10);
2714                 timeout++;
2715         }
2716         if (!DAC960_LA_hw_mbox_status_available(base)) {
2717                 dev_err(&pdev->dev, "Timeout waiting for mailbox status\n");
2718                 return MYRB_STATUS_SUBSYS_TIMEOUT;
2719         }
2720         status = DAC960_LA_read_status(base);
2721         DAC960_LA_ack_hw_mbox_intr(base);
2722         DAC960_LA_ack_hw_mbox_status(base);
2723
2724         return status;
2725 }
2726
2727 static int DAC960_LA_hw_init(struct pci_dev *pdev,
2728                 struct myrb_hba *cb, void __iomem *base)
2729 {
2730         int timeout = 0;
2731         unsigned char error, parm0, parm1;
2732
2733         DAC960_LA_disable_intr(base);
2734         DAC960_LA_ack_hw_mbox_status(base);
2735         udelay(1000);
2736         timeout = 0;
2737         while (DAC960_LA_init_in_progress(base) &&
2738                timeout < MYRB_MAILBOX_TIMEOUT) {
2739                 if (DAC960_LA_read_error_status(base, &error,
2740                                               &parm0, &parm1) &&
2741                     myrb_err_status(cb, error, parm0, parm1))
2742                         return -ENODEV;
2743                 udelay(10);
2744                 timeout++;
2745         }
2746         if (timeout == MYRB_MAILBOX_TIMEOUT) {
2747                 dev_err(&pdev->dev,
2748                         "Timeout waiting for Controller Initialisation\n");
2749                 return -ETIMEDOUT;
2750         }
2751         if (!myrb_enable_mmio(cb, DAC960_LA_mbox_init)) {
2752                 dev_err(&pdev->dev,
2753                         "Unable to Enable Memory Mailbox Interface\n");
2754                 DAC960_LA_reset_ctrl(base);
2755                 return -ENODEV;
2756         }
2757         DAC960_LA_enable_intr(base);
2758         cb->qcmd = myrb_qcmd;
2759         cb->write_cmd_mbox = DAC960_LA_write_cmd_mbox;
2760         if (cb->dual_mode_interface)
2761                 cb->get_cmd_mbox = DAC960_LA_mem_mbox_new_cmd;
2762         else
2763                 cb->get_cmd_mbox = DAC960_LA_hw_mbox_new_cmd;
2764         cb->disable_intr = DAC960_LA_disable_intr;
2765         cb->reset = DAC960_LA_reset_ctrl;
2766
2767         return 0;
2768 }
2769
2770 static irqreturn_t DAC960_LA_intr_handler(int irq, void *arg)
2771 {
2772         struct myrb_hba *cb = arg;
2773         void __iomem *base = cb->io_base;
2774         struct myrb_stat_mbox *next_stat_mbox;
2775         unsigned long flags;
2776
2777         spin_lock_irqsave(&cb->queue_lock, flags);
2778         DAC960_LA_ack_intr(base);
2779         next_stat_mbox = cb->next_stat_mbox;
2780         while (next_stat_mbox->valid) {
2781                 unsigned char id = next_stat_mbox->id;
2782                 struct scsi_cmnd *scmd = NULL;
2783                 struct myrb_cmdblk *cmd_blk = NULL;
2784
2785                 if (id == MYRB_DCMD_TAG)
2786                         cmd_blk = &cb->dcmd_blk;
2787                 else if (id == MYRB_MCMD_TAG)
2788                         cmd_blk = &cb->mcmd_blk;
2789                 else {
2790                         scmd = scsi_host_find_tag(cb->host, id - 3);
2791                         if (scmd)
2792                                 cmd_blk = scsi_cmd_priv(scmd);
2793                 }
2794                 if (cmd_blk)
2795                         cmd_blk->status = next_stat_mbox->status;
2796                 else
2797                         dev_err(&cb->pdev->dev,
2798                                 "Unhandled command completion %d\n", id);
2799
2800                 memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
2801                 if (++next_stat_mbox > cb->last_stat_mbox)
2802                         next_stat_mbox = cb->first_stat_mbox;
2803
2804                 if (cmd_blk) {
2805                         if (id < 3)
2806                                 myrb_handle_cmdblk(cb, cmd_blk);
2807                         else
2808                                 myrb_handle_scsi(cb, cmd_blk, scmd);
2809                 }
2810         }
2811         cb->next_stat_mbox = next_stat_mbox;
2812         spin_unlock_irqrestore(&cb->queue_lock, flags);
2813         return IRQ_HANDLED;
2814 }
2815
2816 struct myrb_privdata DAC960_LA_privdata = {
2817         .hw_init =      DAC960_LA_hw_init,
2818         .irq_handler =  DAC960_LA_intr_handler,
2819         .mmio_size =    DAC960_LA_mmio_size,
2820 };
2821
2822 /*
2823  * DAC960 PG Series Controllers
2824  */
2825 static inline void DAC960_PG_hw_mbox_new_cmd(void __iomem *base)
2826 {
2827         writel(DAC960_PG_IDB_HWMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
2828 }
2829
2830 static inline void DAC960_PG_ack_hw_mbox_status(void __iomem *base)
2831 {
2832         writel(DAC960_PG_IDB_HWMBOX_ACK_STS, base + DAC960_PG_IDB_OFFSET);
2833 }
2834
2835 static inline void DAC960_PG_gen_intr(void __iomem *base)
2836 {
2837         writel(DAC960_PG_IDB_GEN_IRQ, base + DAC960_PG_IDB_OFFSET);
2838 }
2839
2840 static inline void DAC960_PG_reset_ctrl(void __iomem *base)
2841 {
2842         writel(DAC960_PG_IDB_CTRL_RESET, base + DAC960_PG_IDB_OFFSET);
2843 }
2844
2845 static inline void DAC960_PG_mem_mbox_new_cmd(void __iomem *base)
2846 {
2847         writel(DAC960_PG_IDB_MMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
2848 }
2849
2850 static inline bool DAC960_PG_hw_mbox_is_full(void __iomem *base)
2851 {
2852         unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
2853
2854         return idb & DAC960_PG_IDB_HWMBOX_FULL;
2855 }
2856
2857 static inline bool DAC960_PG_init_in_progress(void __iomem *base)
2858 {
2859         unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
2860
2861         return idb & DAC960_PG_IDB_INIT_IN_PROGRESS;
2862 }
2863
2864 static inline void DAC960_PG_ack_hw_mbox_intr(void __iomem *base)
2865 {
2866         writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET);
2867 }
2868
2869 static inline void DAC960_PG_ack_mem_mbox_intr(void __iomem *base)
2870 {
2871         writel(DAC960_PG_ODB_MMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET);
2872 }
2873
2874 static inline void DAC960_PG_ack_intr(void __iomem *base)
2875 {
2876         writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ | DAC960_PG_ODB_MMBOX_ACK_IRQ,
2877                base + DAC960_PG_ODB_OFFSET);
2878 }
2879
2880 static inline bool DAC960_PG_hw_mbox_status_available(void __iomem *base)
2881 {
2882         unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET);
2883
2884         return odb & DAC960_PG_ODB_HWMBOX_STS_AVAIL;
2885 }
2886
2887 static inline bool DAC960_PG_mem_mbox_status_available(void __iomem *base)
2888 {
2889         unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET);
2890
2891         return odb & DAC960_PG_ODB_MMBOX_STS_AVAIL;
2892 }
2893
2894 static inline void DAC960_PG_enable_intr(void __iomem *base)
2895 {
2896         unsigned int imask = (unsigned int)-1;
2897
2898         imask &= ~DAC960_PG_IRQMASK_DISABLE_IRQ;
2899         writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
2900 }
2901
2902 static inline void DAC960_PG_disable_intr(void __iomem *base)
2903 {
2904         unsigned int imask = (unsigned int)-1;
2905
2906         writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
2907 }
2908
2909 static inline bool DAC960_PG_intr_enabled(void __iomem *base)
2910 {
2911         unsigned int imask = readl(base + DAC960_PG_IRQMASK_OFFSET);
2912
2913         return !(imask & DAC960_PG_IRQMASK_DISABLE_IRQ);
2914 }
2915
2916 static inline void DAC960_PG_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
2917                 union myrb_cmd_mbox *mbox)
2918 {
2919         mem_mbox->words[1] = mbox->words[1];
2920         mem_mbox->words[2] = mbox->words[2];
2921         mem_mbox->words[3] = mbox->words[3];
2922         /* Memory barrier to prevent reordering */
2923         wmb();
2924         mem_mbox->words[0] = mbox->words[0];
2925         /* Memory barrier to force PCI access */
2926         mb();
2927 }
2928
2929 static inline void DAC960_PG_write_hw_mbox(void __iomem *base,
2930                 union myrb_cmd_mbox *mbox)
2931 {
2932         writel(mbox->words[0], base + DAC960_PG_CMDOP_OFFSET);
2933         writel(mbox->words[1], base + DAC960_PG_MBOX4_OFFSET);
2934         writel(mbox->words[2], base + DAC960_PG_MBOX8_OFFSET);
2935         writeb(mbox->bytes[12], base + DAC960_PG_MBOX12_OFFSET);
2936 }
2937
2938 static inline unsigned char
2939 DAC960_PG_read_status_cmd_ident(void __iomem *base)
2940 {
2941         return readb(base + DAC960_PG_STSID_OFFSET);
2942 }
2943
2944 static inline unsigned short
2945 DAC960_PG_read_status(void __iomem *base)
2946 {
2947         return readw(base + DAC960_PG_STS_OFFSET);
2948 }
2949
2950 static inline bool
2951 DAC960_PG_read_error_status(void __iomem *base, unsigned char *error,
2952                 unsigned char *param0, unsigned char *param1)
2953 {
2954         unsigned char errsts = readb(base + DAC960_PG_ERRSTS_OFFSET);
2955
2956         if (!(errsts & DAC960_PG_ERRSTS_PENDING))
2957                 return false;
2958         errsts &= ~DAC960_PG_ERRSTS_PENDING;
2959         *error = errsts;
2960         *param0 = readb(base + DAC960_PG_CMDOP_OFFSET);
2961         *param1 = readb(base + DAC960_PG_CMDID_OFFSET);
2962         writeb(0, base + DAC960_PG_ERRSTS_OFFSET);
2963         return true;
2964 }
2965
2966 static inline unsigned short
2967 DAC960_PG_mbox_init(struct pci_dev *pdev, void __iomem *base,
2968                 union myrb_cmd_mbox *mbox)
2969 {
2970         unsigned short status;
2971         int timeout = 0;
2972
2973         while (timeout < MYRB_MAILBOX_TIMEOUT) {
2974                 if (!DAC960_PG_hw_mbox_is_full(base))
2975                         break;
2976                 udelay(10);
2977                 timeout++;
2978         }
2979         if (DAC960_PG_hw_mbox_is_full(base)) {
2980                 dev_err(&pdev->dev,
2981                         "Timeout waiting for empty mailbox\n");
2982                 return MYRB_STATUS_SUBSYS_TIMEOUT;
2983         }
2984         DAC960_PG_write_hw_mbox(base, mbox);
2985         DAC960_PG_hw_mbox_new_cmd(base);
2986
2987         timeout = 0;
2988         while (timeout < MYRB_MAILBOX_TIMEOUT) {
2989                 if (DAC960_PG_hw_mbox_status_available(base))
2990                         break;
2991                 udelay(10);
2992                 timeout++;
2993         }
2994         if (!DAC960_PG_hw_mbox_status_available(base)) {
2995                 dev_err(&pdev->dev,
2996                         "Timeout waiting for mailbox status\n");
2997                 return MYRB_STATUS_SUBSYS_TIMEOUT;
2998         }
2999         status = DAC960_PG_read_status(base);
3000         DAC960_PG_ack_hw_mbox_intr(base);
3001         DAC960_PG_ack_hw_mbox_status(base);
3002
3003         return status;
3004 }
3005
3006 static int DAC960_PG_hw_init(struct pci_dev *pdev,
3007                 struct myrb_hba *cb, void __iomem *base)
3008 {
3009         int timeout = 0;
3010         unsigned char error, parm0, parm1;
3011
3012         DAC960_PG_disable_intr(base);
3013         DAC960_PG_ack_hw_mbox_status(base);
3014         udelay(1000);
3015         while (DAC960_PG_init_in_progress(base) &&
3016                timeout < MYRB_MAILBOX_TIMEOUT) {
3017                 if (DAC960_PG_read_error_status(base, &error,
3018                                                 &parm0, &parm1) &&
3019                     myrb_err_status(cb, error, parm0, parm1))
3020                         return -EIO;
3021                 udelay(10);
3022                 timeout++;
3023         }
3024         if (timeout == MYRB_MAILBOX_TIMEOUT) {
3025                 dev_err(&pdev->dev,
3026                         "Timeout waiting for Controller Initialisation\n");
3027                 return -ETIMEDOUT;
3028         }
3029         if (!myrb_enable_mmio(cb, DAC960_PG_mbox_init)) {
3030                 dev_err(&pdev->dev,
3031                         "Unable to Enable Memory Mailbox Interface\n");
3032                 DAC960_PG_reset_ctrl(base);
3033                 return -ENODEV;
3034         }
3035         DAC960_PG_enable_intr(base);
3036         cb->qcmd = myrb_qcmd;
3037         cb->write_cmd_mbox = DAC960_PG_write_cmd_mbox;
3038         if (cb->dual_mode_interface)
3039                 cb->get_cmd_mbox = DAC960_PG_mem_mbox_new_cmd;
3040         else
3041                 cb->get_cmd_mbox = DAC960_PG_hw_mbox_new_cmd;
3042         cb->disable_intr = DAC960_PG_disable_intr;
3043         cb->reset = DAC960_PG_reset_ctrl;
3044
3045         return 0;
3046 }
3047
3048 static irqreturn_t DAC960_PG_intr_handler(int irq, void *arg)
3049 {
3050         struct myrb_hba *cb = arg;
3051         void __iomem *base = cb->io_base;
3052         struct myrb_stat_mbox *next_stat_mbox;
3053         unsigned long flags;
3054
3055         spin_lock_irqsave(&cb->queue_lock, flags);
3056         DAC960_PG_ack_intr(base);
3057         next_stat_mbox = cb->next_stat_mbox;
3058         while (next_stat_mbox->valid) {
3059                 unsigned char id = next_stat_mbox->id;
3060                 struct scsi_cmnd *scmd = NULL;
3061                 struct myrb_cmdblk *cmd_blk = NULL;
3062
3063                 if (id == MYRB_DCMD_TAG)
3064                         cmd_blk = &cb->dcmd_blk;
3065                 else if (id == MYRB_MCMD_TAG)
3066                         cmd_blk = &cb->mcmd_blk;
3067                 else {
3068                         scmd = scsi_host_find_tag(cb->host, id - 3);
3069                         if (scmd)
3070                                 cmd_blk = scsi_cmd_priv(scmd);
3071                 }
3072                 if (cmd_blk)
3073                         cmd_blk->status = next_stat_mbox->status;
3074                 else
3075                         dev_err(&cb->pdev->dev,
3076                                 "Unhandled command completion %d\n", id);
3077
3078                 memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
3079                 if (++next_stat_mbox > cb->last_stat_mbox)
3080                         next_stat_mbox = cb->first_stat_mbox;
3081
3082                 if (id < 3)
3083                         myrb_handle_cmdblk(cb, cmd_blk);
3084                 else
3085                         myrb_handle_scsi(cb, cmd_blk, scmd);
3086         }
3087         cb->next_stat_mbox = next_stat_mbox;
3088         spin_unlock_irqrestore(&cb->queue_lock, flags);
3089         return IRQ_HANDLED;
3090 }
3091
3092 struct myrb_privdata DAC960_PG_privdata = {
3093         .hw_init =      DAC960_PG_hw_init,
3094         .irq_handler =  DAC960_PG_intr_handler,
3095         .mmio_size =    DAC960_PG_mmio_size,
3096 };
3097
3098
3099 /*
3100  * DAC960 PD Series Controllers
3101  */
3102
3103 static inline void DAC960_PD_hw_mbox_new_cmd(void __iomem *base)
3104 {
3105         writeb(DAC960_PD_IDB_HWMBOX_NEW_CMD, base + DAC960_PD_IDB_OFFSET);
3106 }
3107
3108 static inline void DAC960_PD_ack_hw_mbox_status(void __iomem *base)
3109 {
3110         writeb(DAC960_PD_IDB_HWMBOX_ACK_STS, base + DAC960_PD_IDB_OFFSET);
3111 }
3112
3113 static inline void DAC960_PD_gen_intr(void __iomem *base)
3114 {
3115         writeb(DAC960_PD_IDB_GEN_IRQ, base + DAC960_PD_IDB_OFFSET);
3116 }
3117
3118 static inline void DAC960_PD_reset_ctrl(void __iomem *base)
3119 {
3120         writeb(DAC960_PD_IDB_CTRL_RESET, base + DAC960_PD_IDB_OFFSET);
3121 }
3122
3123 static inline bool DAC960_PD_hw_mbox_is_full(void __iomem *base)
3124 {
3125         unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
3126
3127         return idb & DAC960_PD_IDB_HWMBOX_FULL;
3128 }
3129
3130 static inline bool DAC960_PD_init_in_progress(void __iomem *base)
3131 {
3132         unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
3133
3134         return idb & DAC960_PD_IDB_INIT_IN_PROGRESS;
3135 }
3136
3137 static inline void DAC960_PD_ack_intr(void __iomem *base)
3138 {
3139         writeb(DAC960_PD_ODB_HWMBOX_ACK_IRQ, base + DAC960_PD_ODB_OFFSET);
3140 }
3141
3142 static inline bool DAC960_PD_hw_mbox_status_available(void __iomem *base)
3143 {
3144         unsigned char odb = readb(base + DAC960_PD_ODB_OFFSET);
3145
3146         return odb & DAC960_PD_ODB_HWMBOX_STS_AVAIL;
3147 }
3148
3149 static inline void DAC960_PD_enable_intr(void __iomem *base)
3150 {
3151         writeb(DAC960_PD_IRQMASK_ENABLE_IRQ, base + DAC960_PD_IRQEN_OFFSET);
3152 }
3153
3154 static inline void DAC960_PD_disable_intr(void __iomem *base)
3155 {
3156         writeb(0, base + DAC960_PD_IRQEN_OFFSET);
3157 }
3158
3159 static inline bool DAC960_PD_intr_enabled(void __iomem *base)
3160 {
3161         unsigned char imask = readb(base + DAC960_PD_IRQEN_OFFSET);
3162
3163         return imask & DAC960_PD_IRQMASK_ENABLE_IRQ;
3164 }
3165
3166 static inline void DAC960_PD_write_cmd_mbox(void __iomem *base,
3167                 union myrb_cmd_mbox *mbox)
3168 {
3169         writel(mbox->words[0], base + DAC960_PD_CMDOP_OFFSET);
3170         writel(mbox->words[1], base + DAC960_PD_MBOX4_OFFSET);
3171         writel(mbox->words[2], base + DAC960_PD_MBOX8_OFFSET);
3172         writeb(mbox->bytes[12], base + DAC960_PD_MBOX12_OFFSET);
3173 }
3174
3175 static inline unsigned char
3176 DAC960_PD_read_status_cmd_ident(void __iomem *base)
3177 {
3178         return readb(base + DAC960_PD_STSID_OFFSET);
3179 }
3180
3181 static inline unsigned short
3182 DAC960_PD_read_status(void __iomem *base)
3183 {
3184         return readw(base + DAC960_PD_STS_OFFSET);
3185 }
3186
3187 static inline bool
3188 DAC960_PD_read_error_status(void __iomem *base, unsigned char *error,
3189                 unsigned char *param0, unsigned char *param1)
3190 {
3191         unsigned char errsts = readb(base + DAC960_PD_ERRSTS_OFFSET);
3192
3193         if (!(errsts & DAC960_PD_ERRSTS_PENDING))
3194                 return false;
3195         errsts &= ~DAC960_PD_ERRSTS_PENDING;
3196         *error = errsts;
3197         *param0 = readb(base + DAC960_PD_CMDOP_OFFSET);
3198         *param1 = readb(base + DAC960_PD_CMDID_OFFSET);
3199         writeb(0, base + DAC960_PD_ERRSTS_OFFSET);
3200         return true;
3201 }
3202
3203 static void DAC960_PD_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
3204 {
3205         void __iomem *base = cb->io_base;
3206         union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3207
3208         while (DAC960_PD_hw_mbox_is_full(base))
3209                 udelay(1);
3210         DAC960_PD_write_cmd_mbox(base, mbox);
3211         DAC960_PD_hw_mbox_new_cmd(base);
3212 }
3213
3214 static int DAC960_PD_hw_init(struct pci_dev *pdev,
3215                 struct myrb_hba *cb, void __iomem *base)
3216 {
3217         int timeout = 0;
3218         unsigned char error, parm0, parm1;
3219
3220         if (!request_region(cb->io_addr, 0x80, "myrb")) {
3221                 dev_err(&pdev->dev, "IO port 0x%lx busy\n",
3222                         (unsigned long)cb->io_addr);
3223                 return -EBUSY;
3224         }
3225         DAC960_PD_disable_intr(base);
3226         DAC960_PD_ack_hw_mbox_status(base);
3227         udelay(1000);
3228         while (DAC960_PD_init_in_progress(base) &&
3229                timeout < MYRB_MAILBOX_TIMEOUT) {
3230                 if (DAC960_PD_read_error_status(base, &error,
3231                                               &parm0, &parm1) &&
3232                     myrb_err_status(cb, error, parm0, parm1))
3233                         return -EIO;
3234                 udelay(10);
3235                 timeout++;
3236         }
3237         if (timeout == MYRB_MAILBOX_TIMEOUT) {
3238                 dev_err(&pdev->dev,
3239                         "Timeout waiting for Controller Initialisation\n");
3240                 return -ETIMEDOUT;
3241         }
3242         if (!myrb_enable_mmio(cb, NULL)) {
3243                 dev_err(&pdev->dev,
3244                         "Unable to Enable Memory Mailbox Interface\n");
3245                 DAC960_PD_reset_ctrl(base);
3246                 return -ENODEV;
3247         }
3248         DAC960_PD_enable_intr(base);
3249         cb->qcmd = DAC960_PD_qcmd;
3250         cb->disable_intr = DAC960_PD_disable_intr;
3251         cb->reset = DAC960_PD_reset_ctrl;
3252
3253         return 0;
3254 }
3255
3256 static irqreturn_t DAC960_PD_intr_handler(int irq, void *arg)
3257 {
3258         struct myrb_hba *cb = arg;
3259         void __iomem *base = cb->io_base;
3260         unsigned long flags;
3261
3262         spin_lock_irqsave(&cb->queue_lock, flags);
3263         while (DAC960_PD_hw_mbox_status_available(base)) {
3264                 unsigned char id = DAC960_PD_read_status_cmd_ident(base);
3265                 struct scsi_cmnd *scmd = NULL;
3266                 struct myrb_cmdblk *cmd_blk = NULL;
3267
3268                 if (id == MYRB_DCMD_TAG)
3269                         cmd_blk = &cb->dcmd_blk;
3270                 else if (id == MYRB_MCMD_TAG)
3271                         cmd_blk = &cb->mcmd_blk;
3272                 else {
3273                         scmd = scsi_host_find_tag(cb->host, id - 3);
3274                         if (scmd)
3275                                 cmd_blk = scsi_cmd_priv(scmd);
3276                 }
3277                 if (cmd_blk)
3278                         cmd_blk->status = DAC960_PD_read_status(base);
3279                 else
3280                         dev_err(&cb->pdev->dev,
3281                                 "Unhandled command completion %d\n", id);
3282
3283                 DAC960_PD_ack_intr(base);
3284                 DAC960_PD_ack_hw_mbox_status(base);
3285
3286                 if (id < 3)
3287                         myrb_handle_cmdblk(cb, cmd_blk);
3288                 else
3289                         myrb_handle_scsi(cb, cmd_blk, scmd);
3290         }
3291         spin_unlock_irqrestore(&cb->queue_lock, flags);
3292         return IRQ_HANDLED;
3293 }
3294
3295 struct myrb_privdata DAC960_PD_privdata = {
3296         .hw_init =      DAC960_PD_hw_init,
3297         .irq_handler =  DAC960_PD_intr_handler,
3298         .mmio_size =    DAC960_PD_mmio_size,
3299 };
3300
3301
3302 /*
3303  * DAC960 P Series Controllers
3304  *
3305  * Similar to the DAC960 PD Series Controllers, but some commands have
3306  * to be translated.
3307  */
3308
3309 static inline void myrb_translate_enquiry(void *enq)
3310 {
3311         memcpy(enq + 132, enq + 36, 64);
3312         memset(enq + 36, 0, 96);
3313 }
3314
3315 static inline void myrb_translate_devstate(void *state)
3316 {
3317         memcpy(state + 2, state + 3, 1);
3318         memmove(state + 4, state + 5, 2);
3319         memmove(state + 6, state + 8, 4);
3320 }
3321
3322 static inline void myrb_translate_to_rw_command(struct myrb_cmdblk *cmd_blk)
3323 {
3324         union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3325         int ldev_num = mbox->type5.ld.ldev_num;
3326
3327         mbox->bytes[3] &= 0x7;
3328         mbox->bytes[3] |= mbox->bytes[7] << 6;
3329         mbox->bytes[7] = ldev_num;
3330 }
3331
3332 static inline void myrb_translate_from_rw_command(struct myrb_cmdblk *cmd_blk)
3333 {
3334         union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3335         int ldev_num = mbox->bytes[7];
3336
3337         mbox->bytes[7] = mbox->bytes[3] >> 6;
3338         mbox->bytes[3] &= 0x7;
3339         mbox->bytes[3] |= ldev_num << 3;
3340 }
3341
3342 static void DAC960_P_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
3343 {
3344         void __iomem *base = cb->io_base;
3345         union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3346
3347         switch (mbox->common.opcode) {
3348         case MYRB_CMD_ENQUIRY:
3349                 mbox->common.opcode = MYRB_CMD_ENQUIRY_OLD;
3350                 break;
3351         case MYRB_CMD_GET_DEVICE_STATE:
3352                 mbox->common.opcode = MYRB_CMD_GET_DEVICE_STATE_OLD;
3353                 break;
3354         case MYRB_CMD_READ:
3355                 mbox->common.opcode = MYRB_CMD_READ_OLD;
3356                 myrb_translate_to_rw_command(cmd_blk);
3357                 break;
3358         case MYRB_CMD_WRITE:
3359                 mbox->common.opcode = MYRB_CMD_WRITE_OLD;
3360                 myrb_translate_to_rw_command(cmd_blk);
3361                 break;
3362         case MYRB_CMD_READ_SG:
3363                 mbox->common.opcode = MYRB_CMD_READ_SG_OLD;
3364                 myrb_translate_to_rw_command(cmd_blk);
3365                 break;
3366         case MYRB_CMD_WRITE_SG:
3367                 mbox->common.opcode = MYRB_CMD_WRITE_SG_OLD;
3368                 myrb_translate_to_rw_command(cmd_blk);
3369                 break;
3370         default:
3371                 break;
3372         }
3373         while (DAC960_PD_hw_mbox_is_full(base))
3374                 udelay(1);
3375         DAC960_PD_write_cmd_mbox(base, mbox);
3376         DAC960_PD_hw_mbox_new_cmd(base);
3377 }
3378
3379
3380 static int DAC960_P_hw_init(struct pci_dev *pdev,
3381                 struct myrb_hba *cb, void __iomem *base)
3382 {
3383         int timeout = 0;
3384         unsigned char error, parm0, parm1;
3385
3386         if (!request_region(cb->io_addr, 0x80, "myrb")) {
3387                 dev_err(&pdev->dev, "IO port 0x%lx busy\n",
3388                         (unsigned long)cb->io_addr);
3389                 return -EBUSY;
3390         }
3391         DAC960_PD_disable_intr(base);
3392         DAC960_PD_ack_hw_mbox_status(base);
3393         udelay(1000);
3394         while (DAC960_PD_init_in_progress(base) &&
3395                timeout < MYRB_MAILBOX_TIMEOUT) {
3396                 if (DAC960_PD_read_error_status(base, &error,
3397                                                 &parm0, &parm1) &&
3398                     myrb_err_status(cb, error, parm0, parm1))
3399                         return -EAGAIN;
3400                 udelay(10);
3401                 timeout++;
3402         }
3403         if (timeout == MYRB_MAILBOX_TIMEOUT) {
3404                 dev_err(&pdev->dev,
3405                         "Timeout waiting for Controller Initialisation\n");
3406                 return -ETIMEDOUT;
3407         }
3408         if (!myrb_enable_mmio(cb, NULL)) {
3409                 dev_err(&pdev->dev,
3410                         "Unable to allocate DMA mapped memory\n");
3411                 DAC960_PD_reset_ctrl(base);
3412                 return -ETIMEDOUT;
3413         }
3414         DAC960_PD_enable_intr(base);
3415         cb->qcmd = DAC960_P_qcmd;
3416         cb->disable_intr = DAC960_PD_disable_intr;
3417         cb->reset = DAC960_PD_reset_ctrl;
3418
3419         return 0;
3420 }
3421
3422 static irqreturn_t DAC960_P_intr_handler(int irq, void *arg)
3423 {
3424         struct myrb_hba *cb = arg;
3425         void __iomem *base = cb->io_base;
3426         unsigned long flags;
3427
3428         spin_lock_irqsave(&cb->queue_lock, flags);
3429         while (DAC960_PD_hw_mbox_status_available(base)) {
3430                 unsigned char id = DAC960_PD_read_status_cmd_ident(base);
3431                 struct scsi_cmnd *scmd = NULL;
3432                 struct myrb_cmdblk *cmd_blk = NULL;
3433                 union myrb_cmd_mbox *mbox;
3434                 enum myrb_cmd_opcode op;
3435
3436
3437                 if (id == MYRB_DCMD_TAG)
3438                         cmd_blk = &cb->dcmd_blk;
3439                 else if (id == MYRB_MCMD_TAG)
3440                         cmd_blk = &cb->mcmd_blk;
3441                 else {
3442                         scmd = scsi_host_find_tag(cb->host, id - 3);
3443                         if (scmd)
3444                                 cmd_blk = scsi_cmd_priv(scmd);
3445                 }
3446                 if (cmd_blk)
3447                         cmd_blk->status = DAC960_PD_read_status(base);
3448                 else
3449                         dev_err(&cb->pdev->dev,
3450                                 "Unhandled command completion %d\n", id);
3451
3452                 DAC960_PD_ack_intr(base);
3453                 DAC960_PD_ack_hw_mbox_status(base);
3454
3455                 if (!cmd_blk)
3456                         continue;
3457
3458                 mbox = &cmd_blk->mbox;
3459                 op = mbox->common.opcode;
3460                 switch (op) {
3461                 case MYRB_CMD_ENQUIRY_OLD:
3462                         mbox->common.opcode = MYRB_CMD_ENQUIRY;
3463                         myrb_translate_enquiry(cb->enquiry);
3464                         break;
3465                 case MYRB_CMD_READ_OLD:
3466                         mbox->common.opcode = MYRB_CMD_READ;
3467                         myrb_translate_from_rw_command(cmd_blk);
3468                         break;
3469                 case MYRB_CMD_WRITE_OLD:
3470                         mbox->common.opcode = MYRB_CMD_WRITE;
3471                         myrb_translate_from_rw_command(cmd_blk);
3472                         break;
3473                 case MYRB_CMD_READ_SG_OLD:
3474                         mbox->common.opcode = MYRB_CMD_READ_SG;
3475                         myrb_translate_from_rw_command(cmd_blk);
3476                         break;
3477                 case MYRB_CMD_WRITE_SG_OLD:
3478                         mbox->common.opcode = MYRB_CMD_WRITE_SG;
3479                         myrb_translate_from_rw_command(cmd_blk);
3480                         break;
3481                 default:
3482                         break;
3483                 }
3484                 if (id < 3)
3485                         myrb_handle_cmdblk(cb, cmd_blk);
3486                 else
3487                         myrb_handle_scsi(cb, cmd_blk, scmd);
3488         }
3489         spin_unlock_irqrestore(&cb->queue_lock, flags);
3490         return IRQ_HANDLED;
3491 }
3492
3493 struct myrb_privdata DAC960_P_privdata = {
3494         .hw_init =      DAC960_P_hw_init,
3495         .irq_handler =  DAC960_P_intr_handler,
3496         .mmio_size =    DAC960_PD_mmio_size,
3497 };
3498
3499 static struct myrb_hba *myrb_detect(struct pci_dev *pdev,
3500                 const struct pci_device_id *entry)
3501 {
3502         struct myrb_privdata *privdata =
3503                 (struct myrb_privdata *)entry->driver_data;
3504         irq_handler_t irq_handler = privdata->irq_handler;
3505         unsigned int mmio_size = privdata->mmio_size;
3506         struct Scsi_Host *shost;
3507         struct myrb_hba *cb = NULL;
3508
3509         shost = scsi_host_alloc(&myrb_template, sizeof(struct myrb_hba));
3510         if (!shost) {
3511                 dev_err(&pdev->dev, "Unable to allocate Controller\n");
3512                 return NULL;
3513         }
3514         shost->max_cmd_len = 12;
3515         shost->max_lun = 256;
3516         cb = shost_priv(shost);
3517         mutex_init(&cb->dcmd_mutex);
3518         mutex_init(&cb->dma_mutex);
3519         cb->pdev = pdev;
3520         cb->host = shost;
3521
3522         if (pci_enable_device(pdev)) {
3523                 dev_err(&pdev->dev, "Failed to enable PCI device\n");
3524                 scsi_host_put(shost);
3525                 return NULL;
3526         }
3527
3528         if (privdata->hw_init == DAC960_PD_hw_init ||
3529             privdata->hw_init == DAC960_P_hw_init) {
3530                 cb->io_addr = pci_resource_start(pdev, 0);
3531                 cb->pci_addr = pci_resource_start(pdev, 1);
3532         } else
3533                 cb->pci_addr = pci_resource_start(pdev, 0);
3534
3535         pci_set_drvdata(pdev, cb);
3536         spin_lock_init(&cb->queue_lock);
3537         if (mmio_size < PAGE_SIZE)
3538                 mmio_size = PAGE_SIZE;
3539         cb->mmio_base = ioremap_nocache(cb->pci_addr & PAGE_MASK, mmio_size);
3540         if (cb->mmio_base == NULL) {
3541                 dev_err(&pdev->dev,
3542                         "Unable to map Controller Register Window\n");
3543                 goto failure;
3544         }
3545
3546         cb->io_base = cb->mmio_base + (cb->pci_addr & ~PAGE_MASK);
3547         if (privdata->hw_init(pdev, cb, cb->io_base))
3548                 goto failure;
3549
3550         if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrb", cb) < 0) {
3551                 dev_err(&pdev->dev,
3552                         "Unable to acquire IRQ Channel %d\n", pdev->irq);
3553                 goto failure;
3554         }
3555         cb->irq = pdev->irq;
3556         return cb;
3557
3558 failure:
3559         dev_err(&pdev->dev,
3560                 "Failed to initialize Controller\n");
3561         myrb_cleanup(cb);
3562         return NULL;
3563 }
3564
3565 static int myrb_probe(struct pci_dev *dev, const struct pci_device_id *entry)
3566 {
3567         struct myrb_hba *cb;
3568         int ret;
3569
3570         cb = myrb_detect(dev, entry);
3571         if (!cb)
3572                 return -ENODEV;
3573
3574         ret = myrb_get_hba_config(cb);
3575         if (ret < 0) {
3576                 myrb_cleanup(cb);
3577                 return ret;
3578         }
3579
3580         if (!myrb_create_mempools(dev, cb)) {
3581                 ret = -ENOMEM;
3582                 goto failed;
3583         }
3584
3585         ret = scsi_add_host(cb->host, &dev->dev);
3586         if (ret) {
3587                 dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret);
3588                 myrb_destroy_mempools(cb);
3589                 goto failed;
3590         }
3591         scsi_scan_host(cb->host);
3592         return 0;
3593 failed:
3594         myrb_cleanup(cb);
3595         return ret;
3596 }
3597
3598
3599 static void myrb_remove(struct pci_dev *pdev)
3600 {
3601         struct myrb_hba *cb = pci_get_drvdata(pdev);
3602
3603         shost_printk(KERN_NOTICE, cb->host, "Flushing Cache...");
3604         myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
3605         myrb_cleanup(cb);
3606         myrb_destroy_mempools(cb);
3607 }
3608
3609
3610 static const struct pci_device_id myrb_id_table[] = {
3611         {
3612                 PCI_DEVICE_SUB(PCI_VENDOR_ID_DEC,
3613                                PCI_DEVICE_ID_DEC_21285,
3614                                PCI_VENDOR_ID_MYLEX,
3615                                PCI_DEVICE_ID_MYLEX_DAC960_LA),
3616                 .driver_data    = (unsigned long) &DAC960_LA_privdata,
3617         },
3618         {
3619                 PCI_DEVICE_DATA(MYLEX, DAC960_PG, &DAC960_PG_privdata),
3620         },
3621         {
3622                 PCI_DEVICE_DATA(MYLEX, DAC960_PD, &DAC960_PD_privdata),
3623         },
3624         {
3625                 PCI_DEVICE_DATA(MYLEX, DAC960_P, &DAC960_P_privdata),
3626         },
3627         {0, },
3628 };
3629
3630 MODULE_DEVICE_TABLE(pci, myrb_id_table);
3631
3632 static struct pci_driver myrb_pci_driver = {
3633         .name           = "myrb",
3634         .id_table       = myrb_id_table,
3635         .probe          = myrb_probe,
3636         .remove         = myrb_remove,
3637 };
3638
3639 static int __init myrb_init_module(void)
3640 {
3641         int ret;
3642
3643         myrb_raid_template = raid_class_attach(&myrb_raid_functions);
3644         if (!myrb_raid_template)
3645                 return -ENODEV;
3646
3647         ret = pci_register_driver(&myrb_pci_driver);
3648         if (ret)
3649                 raid_class_release(myrb_raid_template);
3650
3651         return ret;
3652 }
3653
3654 static void __exit myrb_cleanup_module(void)
3655 {
3656         pci_unregister_driver(&myrb_pci_driver);
3657         raid_class_release(myrb_raid_template);
3658 }
3659
3660 module_init(myrb_init_module);
3661 module_exit(myrb_cleanup_module);
3662
3663 MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (Block interface)");
3664 MODULE_AUTHOR("Hannes Reinecke <hare@suse.com>");
3665 MODULE_LICENSE("GPL");