GNU Linux-libre 5.15.54-gnu
[releases.git] / drivers / scsi / myrb.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
4  *
5  * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
6  *
7  * Based on the original DAC960 driver,
8  * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
9  * Portions Copyright 2002 by Mylex (An IBM Business Unit)
10  *
11  */
12
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/pci.h>
18 #include <linux/raid_class.h>
19 #include <asm/unaligned.h>
20 #include <scsi/scsi.h>
21 #include <scsi/scsi_host.h>
22 #include <scsi/scsi_device.h>
23 #include <scsi/scsi_cmnd.h>
24 #include <scsi/scsi_tcq.h>
25 #include "myrb.h"
26
27 static struct raid_template *myrb_raid_template;
28
29 static void myrb_monitor(struct work_struct *work);
30 static inline void myrb_translate_devstate(void *DeviceState);
31
32 static inline int myrb_logical_channel(struct Scsi_Host *shost)
33 {
34         return shost->max_channel - 1;
35 }
36
37 static struct myrb_devstate_name_entry {
38         enum myrb_devstate state;
39         const char *name;
40 } myrb_devstate_name_list[] = {
41         { MYRB_DEVICE_DEAD, "Dead" },
42         { MYRB_DEVICE_WO, "WriteOnly" },
43         { MYRB_DEVICE_ONLINE, "Online" },
44         { MYRB_DEVICE_CRITICAL, "Critical" },
45         { MYRB_DEVICE_STANDBY, "Standby" },
46         { MYRB_DEVICE_OFFLINE, "Offline" },
47 };
48
49 static const char *myrb_devstate_name(enum myrb_devstate state)
50 {
51         struct myrb_devstate_name_entry *entry = myrb_devstate_name_list;
52         int i;
53
54         for (i = 0; i < ARRAY_SIZE(myrb_devstate_name_list); i++) {
55                 if (entry[i].state == state)
56                         return entry[i].name;
57         }
58         return "Unknown";
59 }
60
61 static struct myrb_raidlevel_name_entry {
62         enum myrb_raidlevel level;
63         const char *name;
64 } myrb_raidlevel_name_list[] = {
65         { MYRB_RAID_LEVEL0, "RAID0" },
66         { MYRB_RAID_LEVEL1, "RAID1" },
67         { MYRB_RAID_LEVEL3, "RAID3" },
68         { MYRB_RAID_LEVEL5, "RAID5" },
69         { MYRB_RAID_LEVEL6, "RAID6" },
70         { MYRB_RAID_JBOD, "JBOD" },
71 };
72
73 static const char *myrb_raidlevel_name(enum myrb_raidlevel level)
74 {
75         struct myrb_raidlevel_name_entry *entry = myrb_raidlevel_name_list;
76         int i;
77
78         for (i = 0; i < ARRAY_SIZE(myrb_raidlevel_name_list); i++) {
79                 if (entry[i].level == level)
80                         return entry[i].name;
81         }
82         return NULL;
83 }
84
85 /*
86  * myrb_create_mempools - allocates auxiliary data structures
87  *
88  * Return: true on success, false otherwise.
89  */
90 static bool myrb_create_mempools(struct pci_dev *pdev, struct myrb_hba *cb)
91 {
92         size_t elem_size, elem_align;
93
94         elem_align = sizeof(struct myrb_sge);
95         elem_size = cb->host->sg_tablesize * elem_align;
96         cb->sg_pool = dma_pool_create("myrb_sg", &pdev->dev,
97                                       elem_size, elem_align, 0);
98         if (cb->sg_pool == NULL) {
99                 shost_printk(KERN_ERR, cb->host,
100                              "Failed to allocate SG pool\n");
101                 return false;
102         }
103
104         cb->dcdb_pool = dma_pool_create("myrb_dcdb", &pdev->dev,
105                                        sizeof(struct myrb_dcdb),
106                                        sizeof(unsigned int), 0);
107         if (!cb->dcdb_pool) {
108                 dma_pool_destroy(cb->sg_pool);
109                 cb->sg_pool = NULL;
110                 shost_printk(KERN_ERR, cb->host,
111                              "Failed to allocate DCDB pool\n");
112                 return false;
113         }
114
115         snprintf(cb->work_q_name, sizeof(cb->work_q_name),
116                  "myrb_wq_%d", cb->host->host_no);
117         cb->work_q = create_singlethread_workqueue(cb->work_q_name);
118         if (!cb->work_q) {
119                 dma_pool_destroy(cb->dcdb_pool);
120                 cb->dcdb_pool = NULL;
121                 dma_pool_destroy(cb->sg_pool);
122                 cb->sg_pool = NULL;
123                 shost_printk(KERN_ERR, cb->host,
124                              "Failed to create workqueue\n");
125                 return false;
126         }
127
128         /*
129          * Initialize the Monitoring Timer.
130          */
131         INIT_DELAYED_WORK(&cb->monitor_work, myrb_monitor);
132         queue_delayed_work(cb->work_q, &cb->monitor_work, 1);
133
134         return true;
135 }
136
137 /*
138  * myrb_destroy_mempools - tears down the memory pools for the controller
139  */
140 static void myrb_destroy_mempools(struct myrb_hba *cb)
141 {
142         cancel_delayed_work_sync(&cb->monitor_work);
143         destroy_workqueue(cb->work_q);
144
145         dma_pool_destroy(cb->sg_pool);
146         dma_pool_destroy(cb->dcdb_pool);
147 }
148
149 /*
150  * myrb_reset_cmd - reset command block
151  */
152 static inline void myrb_reset_cmd(struct myrb_cmdblk *cmd_blk)
153 {
154         union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
155
156         memset(mbox, 0, sizeof(union myrb_cmd_mbox));
157         cmd_blk->status = 0;
158 }
159
160 /*
161  * myrb_qcmd - queues command block for execution
162  */
163 static void myrb_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
164 {
165         void __iomem *base = cb->io_base;
166         union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
167         union myrb_cmd_mbox *next_mbox = cb->next_cmd_mbox;
168
169         cb->write_cmd_mbox(next_mbox, mbox);
170         if (cb->prev_cmd_mbox1->words[0] == 0 ||
171             cb->prev_cmd_mbox2->words[0] == 0)
172                 cb->get_cmd_mbox(base);
173         cb->prev_cmd_mbox2 = cb->prev_cmd_mbox1;
174         cb->prev_cmd_mbox1 = next_mbox;
175         if (++next_mbox > cb->last_cmd_mbox)
176                 next_mbox = cb->first_cmd_mbox;
177         cb->next_cmd_mbox = next_mbox;
178 }
179
180 /*
181  * myrb_exec_cmd - executes command block and waits for completion.
182  *
183  * Return: command status
184  */
185 static unsigned short myrb_exec_cmd(struct myrb_hba *cb,
186                 struct myrb_cmdblk *cmd_blk)
187 {
188         DECLARE_COMPLETION_ONSTACK(cmpl);
189         unsigned long flags;
190
191         cmd_blk->completion = &cmpl;
192
193         spin_lock_irqsave(&cb->queue_lock, flags);
194         cb->qcmd(cb, cmd_blk);
195         spin_unlock_irqrestore(&cb->queue_lock, flags);
196
197         wait_for_completion(&cmpl);
198         return cmd_blk->status;
199 }
200
201 /*
202  * myrb_exec_type3 - executes a type 3 command and waits for completion.
203  *
204  * Return: command status
205  */
206 static unsigned short myrb_exec_type3(struct myrb_hba *cb,
207                 enum myrb_cmd_opcode op, dma_addr_t addr)
208 {
209         struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
210         union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
211         unsigned short status;
212
213         mutex_lock(&cb->dcmd_mutex);
214         myrb_reset_cmd(cmd_blk);
215         mbox->type3.id = MYRB_DCMD_TAG;
216         mbox->type3.opcode = op;
217         mbox->type3.addr = addr;
218         status = myrb_exec_cmd(cb, cmd_blk);
219         mutex_unlock(&cb->dcmd_mutex);
220         return status;
221 }
222
223 /*
224  * myrb_exec_type3D - executes a type 3D command and waits for completion.
225  *
226  * Return: command status
227  */
228 static unsigned short myrb_exec_type3D(struct myrb_hba *cb,
229                 enum myrb_cmd_opcode op, struct scsi_device *sdev,
230                 struct myrb_pdev_state *pdev_info)
231 {
232         struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
233         union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
234         unsigned short status;
235         dma_addr_t pdev_info_addr;
236
237         pdev_info_addr = dma_map_single(&cb->pdev->dev, pdev_info,
238                                         sizeof(struct myrb_pdev_state),
239                                         DMA_FROM_DEVICE);
240         if (dma_mapping_error(&cb->pdev->dev, pdev_info_addr))
241                 return MYRB_STATUS_SUBSYS_FAILED;
242
243         mutex_lock(&cb->dcmd_mutex);
244         myrb_reset_cmd(cmd_blk);
245         mbox->type3D.id = MYRB_DCMD_TAG;
246         mbox->type3D.opcode = op;
247         mbox->type3D.channel = sdev->channel;
248         mbox->type3D.target = sdev->id;
249         mbox->type3D.addr = pdev_info_addr;
250         status = myrb_exec_cmd(cb, cmd_blk);
251         mutex_unlock(&cb->dcmd_mutex);
252         dma_unmap_single(&cb->pdev->dev, pdev_info_addr,
253                          sizeof(struct myrb_pdev_state), DMA_FROM_DEVICE);
254         if (status == MYRB_STATUS_SUCCESS &&
255             mbox->type3D.opcode == MYRB_CMD_GET_DEVICE_STATE_OLD)
256                 myrb_translate_devstate(pdev_info);
257
258         return status;
259 }
260
261 static char *myrb_event_msg[] = {
262         "killed because write recovery failed",
263         "killed because of SCSI bus reset failure",
264         "killed because of double check condition",
265         "killed because it was removed",
266         "killed because of gross error on SCSI chip",
267         "killed because of bad tag returned from drive",
268         "killed because of timeout on SCSI command",
269         "killed because of reset SCSI command issued from system",
270         "killed because busy or parity error count exceeded limit",
271         "killed because of 'kill drive' command from system",
272         "killed because of selection timeout",
273         "killed due to SCSI phase sequence error",
274         "killed due to unknown status",
275 };
276
277 /**
278  * myrb_get_event - get event log from HBA
279  * @cb: pointer to the hba structure
280  * @event: number of the event
281  *
282  * Execute a type 3E command and logs the event message
283  */
284 static void myrb_get_event(struct myrb_hba *cb, unsigned int event)
285 {
286         struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
287         union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
288         struct myrb_log_entry *ev_buf;
289         dma_addr_t ev_addr;
290         unsigned short status;
291
292         ev_buf = dma_alloc_coherent(&cb->pdev->dev,
293                                     sizeof(struct myrb_log_entry),
294                                     &ev_addr, GFP_KERNEL);
295         if (!ev_buf)
296                 return;
297
298         myrb_reset_cmd(cmd_blk);
299         mbox->type3E.id = MYRB_MCMD_TAG;
300         mbox->type3E.opcode = MYRB_CMD_EVENT_LOG_OPERATION;
301         mbox->type3E.optype = DAC960_V1_GetEventLogEntry;
302         mbox->type3E.opqual = 1;
303         mbox->type3E.ev_seq = event;
304         mbox->type3E.addr = ev_addr;
305         status = myrb_exec_cmd(cb, cmd_blk);
306         if (status != MYRB_STATUS_SUCCESS)
307                 shost_printk(KERN_INFO, cb->host,
308                              "Failed to get event log %d, status %04x\n",
309                              event, status);
310
311         else if (ev_buf->seq_num == event) {
312                 struct scsi_sense_hdr sshdr;
313
314                 memset(&sshdr, 0, sizeof(sshdr));
315                 scsi_normalize_sense(ev_buf->sense, 32, &sshdr);
316
317                 if (sshdr.sense_key == VENDOR_SPECIFIC &&
318                     sshdr.asc == 0x80 &&
319                     sshdr.ascq < ARRAY_SIZE(myrb_event_msg))
320                         shost_printk(KERN_CRIT, cb->host,
321                                      "Physical drive %d:%d: %s\n",
322                                      ev_buf->channel, ev_buf->target,
323                                      myrb_event_msg[sshdr.ascq]);
324                 else
325                         shost_printk(KERN_CRIT, cb->host,
326                                      "Physical drive %d:%d: Sense: %X/%02X/%02X\n",
327                                      ev_buf->channel, ev_buf->target,
328                                      sshdr.sense_key, sshdr.asc, sshdr.ascq);
329         }
330
331         dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_log_entry),
332                           ev_buf, ev_addr);
333 }
334
335 /*
336  * myrb_get_errtable - retrieves the error table from the controller
337  *
338  * Executes a type 3 command and logs the error table from the controller.
339  */
340 static void myrb_get_errtable(struct myrb_hba *cb)
341 {
342         struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
343         union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
344         unsigned short status;
345         struct myrb_error_entry old_table[MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS];
346
347         memcpy(&old_table, cb->err_table, sizeof(old_table));
348
349         myrb_reset_cmd(cmd_blk);
350         mbox->type3.id = MYRB_MCMD_TAG;
351         mbox->type3.opcode = MYRB_CMD_GET_ERROR_TABLE;
352         mbox->type3.addr = cb->err_table_addr;
353         status = myrb_exec_cmd(cb, cmd_blk);
354         if (status == MYRB_STATUS_SUCCESS) {
355                 struct myrb_error_entry *table = cb->err_table;
356                 struct myrb_error_entry *new, *old;
357                 size_t err_table_offset;
358                 struct scsi_device *sdev;
359
360                 shost_for_each_device(sdev, cb->host) {
361                         if (sdev->channel >= myrb_logical_channel(cb->host))
362                                 continue;
363                         err_table_offset = sdev->channel * MYRB_MAX_TARGETS
364                                 + sdev->id;
365                         new = table + err_table_offset;
366                         old = &old_table[err_table_offset];
367                         if (new->parity_err == old->parity_err &&
368                             new->soft_err == old->soft_err &&
369                             new->hard_err == old->hard_err &&
370                             new->misc_err == old->misc_err)
371                                 continue;
372                         sdev_printk(KERN_CRIT, sdev,
373                                     "Errors: Parity = %d, Soft = %d, Hard = %d, Misc = %d\n",
374                                     new->parity_err, new->soft_err,
375                                     new->hard_err, new->misc_err);
376                 }
377         }
378 }
379
380 /*
381  * myrb_get_ldev_info - retrieves the logical device table from the controller
382  *
383  * Executes a type 3 command and updates the logical device table.
384  *
385  * Return: command status
386  */
387 static unsigned short myrb_get_ldev_info(struct myrb_hba *cb)
388 {
389         unsigned short status;
390         int ldev_num, ldev_cnt = cb->enquiry->ldev_count;
391         struct Scsi_Host *shost = cb->host;
392
393         status = myrb_exec_type3(cb, MYRB_CMD_GET_LDEV_INFO,
394                                  cb->ldev_info_addr);
395         if (status != MYRB_STATUS_SUCCESS)
396                 return status;
397
398         for (ldev_num = 0; ldev_num < ldev_cnt; ldev_num++) {
399                 struct myrb_ldev_info *old = NULL;
400                 struct myrb_ldev_info *new = cb->ldev_info_buf + ldev_num;
401                 struct scsi_device *sdev;
402
403                 sdev = scsi_device_lookup(shost, myrb_logical_channel(shost),
404                                           ldev_num, 0);
405                 if (!sdev) {
406                         if (new->state == MYRB_DEVICE_OFFLINE)
407                                 continue;
408                         shost_printk(KERN_INFO, shost,
409                                      "Adding Logical Drive %d in state %s\n",
410                                      ldev_num, myrb_devstate_name(new->state));
411                         scsi_add_device(shost, myrb_logical_channel(shost),
412                                         ldev_num, 0);
413                         continue;
414                 }
415                 old = sdev->hostdata;
416                 if (new->state != old->state)
417                         shost_printk(KERN_INFO, shost,
418                                      "Logical Drive %d is now %s\n",
419                                      ldev_num, myrb_devstate_name(new->state));
420                 if (new->wb_enabled != old->wb_enabled)
421                         sdev_printk(KERN_INFO, sdev,
422                                     "Logical Drive is now WRITE %s\n",
423                                     (new->wb_enabled ? "BACK" : "THRU"));
424                 memcpy(old, new, sizeof(*new));
425                 scsi_device_put(sdev);
426         }
427         return status;
428 }
429
430 /*
431  * myrb_get_rbld_progress - get rebuild progress information
432  *
433  * Executes a type 3 command and returns the rebuild progress
434  * information.
435  *
436  * Return: command status
437  */
438 static unsigned short myrb_get_rbld_progress(struct myrb_hba *cb,
439                 struct myrb_rbld_progress *rbld)
440 {
441         struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
442         union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
443         struct myrb_rbld_progress *rbld_buf;
444         dma_addr_t rbld_addr;
445         unsigned short status;
446
447         rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
448                                       sizeof(struct myrb_rbld_progress),
449                                       &rbld_addr, GFP_KERNEL);
450         if (!rbld_buf)
451                 return MYRB_STATUS_RBLD_NOT_CHECKED;
452
453         myrb_reset_cmd(cmd_blk);
454         mbox->type3.id = MYRB_MCMD_TAG;
455         mbox->type3.opcode = MYRB_CMD_GET_REBUILD_PROGRESS;
456         mbox->type3.addr = rbld_addr;
457         status = myrb_exec_cmd(cb, cmd_blk);
458         if (rbld)
459                 memcpy(rbld, rbld_buf, sizeof(struct myrb_rbld_progress));
460         dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
461                           rbld_buf, rbld_addr);
462         return status;
463 }
464
465 /*
466  * myrb_update_rbld_progress - updates the rebuild status
467  *
468  * Updates the rebuild status for the attached logical devices.
469  */
470 static void myrb_update_rbld_progress(struct myrb_hba *cb)
471 {
472         struct myrb_rbld_progress rbld_buf;
473         unsigned short status;
474
475         status = myrb_get_rbld_progress(cb, &rbld_buf);
476         if (status == MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS &&
477             cb->last_rbld_status == MYRB_STATUS_SUCCESS)
478                 status = MYRB_STATUS_RBLD_SUCCESS;
479         if (status != MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS) {
480                 unsigned int blocks_done =
481                         rbld_buf.ldev_size - rbld_buf.blocks_left;
482                 struct scsi_device *sdev;
483
484                 sdev = scsi_device_lookup(cb->host,
485                                           myrb_logical_channel(cb->host),
486                                           rbld_buf.ldev_num, 0);
487                 if (!sdev)
488                         return;
489
490                 switch (status) {
491                 case MYRB_STATUS_SUCCESS:
492                         sdev_printk(KERN_INFO, sdev,
493                                     "Rebuild in Progress, %d%% completed\n",
494                                     (100 * (blocks_done >> 7))
495                                     / (rbld_buf.ldev_size >> 7));
496                         break;
497                 case MYRB_STATUS_RBLD_FAILED_LDEV_FAILURE:
498                         sdev_printk(KERN_INFO, sdev,
499                                     "Rebuild Failed due to Logical Drive Failure\n");
500                         break;
501                 case MYRB_STATUS_RBLD_FAILED_BADBLOCKS:
502                         sdev_printk(KERN_INFO, sdev,
503                                     "Rebuild Failed due to Bad Blocks on Other Drives\n");
504                         break;
505                 case MYRB_STATUS_RBLD_FAILED_NEW_DRIVE_FAILED:
506                         sdev_printk(KERN_INFO, sdev,
507                                     "Rebuild Failed due to Failure of Drive Being Rebuilt\n");
508                         break;
509                 case MYRB_STATUS_RBLD_SUCCESS:
510                         sdev_printk(KERN_INFO, sdev,
511                                     "Rebuild Completed Successfully\n");
512                         break;
513                 case MYRB_STATUS_RBLD_SUCCESS_TERMINATED:
514                         sdev_printk(KERN_INFO, sdev,
515                                      "Rebuild Successfully Terminated\n");
516                         break;
517                 default:
518                         break;
519                 }
520                 scsi_device_put(sdev);
521         }
522         cb->last_rbld_status = status;
523 }
524
525 /*
526  * myrb_get_cc_progress - retrieve the rebuild status
527  *
528  * Execute a type 3 Command and fetch the rebuild / consistency check
529  * status.
530  */
531 static void myrb_get_cc_progress(struct myrb_hba *cb)
532 {
533         struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
534         union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
535         struct myrb_rbld_progress *rbld_buf;
536         dma_addr_t rbld_addr;
537         unsigned short status;
538
539         rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
540                                       sizeof(struct myrb_rbld_progress),
541                                       &rbld_addr, GFP_KERNEL);
542         if (!rbld_buf) {
543                 cb->need_cc_status = true;
544                 return;
545         }
546         myrb_reset_cmd(cmd_blk);
547         mbox->type3.id = MYRB_MCMD_TAG;
548         mbox->type3.opcode = MYRB_CMD_REBUILD_STAT;
549         mbox->type3.addr = rbld_addr;
550         status = myrb_exec_cmd(cb, cmd_blk);
551         if (status == MYRB_STATUS_SUCCESS) {
552                 unsigned int ldev_num = rbld_buf->ldev_num;
553                 unsigned int ldev_size = rbld_buf->ldev_size;
554                 unsigned int blocks_done =
555                         ldev_size - rbld_buf->blocks_left;
556                 struct scsi_device *sdev;
557
558                 sdev = scsi_device_lookup(cb->host,
559                                           myrb_logical_channel(cb->host),
560                                           ldev_num, 0);
561                 if (sdev) {
562                         sdev_printk(KERN_INFO, sdev,
563                                     "Consistency Check in Progress: %d%% completed\n",
564                                     (100 * (blocks_done >> 7))
565                                     / (ldev_size >> 7));
566                         scsi_device_put(sdev);
567                 }
568         }
569         dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
570                           rbld_buf, rbld_addr);
571 }
572
573 /*
574  * myrb_bgi_control - updates background initialisation status
575  *
576  * Executes a type 3B command and updates the background initialisation status
577  */
578 static void myrb_bgi_control(struct myrb_hba *cb)
579 {
580         struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
581         union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
582         struct myrb_bgi_status *bgi, *last_bgi;
583         dma_addr_t bgi_addr;
584         struct scsi_device *sdev = NULL;
585         unsigned short status;
586
587         bgi = dma_alloc_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
588                                  &bgi_addr, GFP_KERNEL);
589         if (!bgi) {
590                 shost_printk(KERN_ERR, cb->host,
591                              "Failed to allocate bgi memory\n");
592                 return;
593         }
594         myrb_reset_cmd(cmd_blk);
595         mbox->type3B.id = MYRB_DCMD_TAG;
596         mbox->type3B.opcode = MYRB_CMD_BGI_CONTROL;
597         mbox->type3B.optype = 0x20;
598         mbox->type3B.addr = bgi_addr;
599         status = myrb_exec_cmd(cb, cmd_blk);
600         last_bgi = &cb->bgi_status;
601         sdev = scsi_device_lookup(cb->host,
602                                   myrb_logical_channel(cb->host),
603                                   bgi->ldev_num, 0);
604         switch (status) {
605         case MYRB_STATUS_SUCCESS:
606                 switch (bgi->status) {
607                 case MYRB_BGI_INVALID:
608                         break;
609                 case MYRB_BGI_STARTED:
610                         if (!sdev)
611                                 break;
612                         sdev_printk(KERN_INFO, sdev,
613                                     "Background Initialization Started\n");
614                         break;
615                 case MYRB_BGI_INPROGRESS:
616                         if (!sdev)
617                                 break;
618                         if (bgi->blocks_done == last_bgi->blocks_done &&
619                             bgi->ldev_num == last_bgi->ldev_num)
620                                 break;
621                         sdev_printk(KERN_INFO, sdev,
622                                  "Background Initialization in Progress: %d%% completed\n",
623                                  (100 * (bgi->blocks_done >> 7))
624                                  / (bgi->ldev_size >> 7));
625                         break;
626                 case MYRB_BGI_SUSPENDED:
627                         if (!sdev)
628                                 break;
629                         sdev_printk(KERN_INFO, sdev,
630                                     "Background Initialization Suspended\n");
631                         break;
632                 case MYRB_BGI_CANCELLED:
633                         if (!sdev)
634                                 break;
635                         sdev_printk(KERN_INFO, sdev,
636                                     "Background Initialization Cancelled\n");
637                         break;
638                 }
639                 memcpy(&cb->bgi_status, bgi, sizeof(struct myrb_bgi_status));
640                 break;
641         case MYRB_STATUS_BGI_SUCCESS:
642                 if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
643                         sdev_printk(KERN_INFO, sdev,
644                                     "Background Initialization Completed Successfully\n");
645                 cb->bgi_status.status = MYRB_BGI_INVALID;
646                 break;
647         case MYRB_STATUS_BGI_ABORTED:
648                 if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
649                         sdev_printk(KERN_INFO, sdev,
650                                     "Background Initialization Aborted\n");
651                 fallthrough;
652         case MYRB_STATUS_NO_BGI_INPROGRESS:
653                 cb->bgi_status.status = MYRB_BGI_INVALID;
654                 break;
655         }
656         if (sdev)
657                 scsi_device_put(sdev);
658         dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
659                           bgi, bgi_addr);
660 }
661
662 /*
663  * myrb_hba_enquiry - updates the controller status
664  *
665  * Executes a DAC_V1_Enquiry command and updates the controller status.
666  *
667  * Return: command status
668  */
669 static unsigned short myrb_hba_enquiry(struct myrb_hba *cb)
670 {
671         struct myrb_enquiry old, *new;
672         unsigned short status;
673
674         memcpy(&old, cb->enquiry, sizeof(struct myrb_enquiry));
675
676         status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY, cb->enquiry_addr);
677         if (status != MYRB_STATUS_SUCCESS)
678                 return status;
679
680         new = cb->enquiry;
681         if (new->ldev_count > old.ldev_count) {
682                 int ldev_num = old.ldev_count - 1;
683
684                 while (++ldev_num < new->ldev_count)
685                         shost_printk(KERN_CRIT, cb->host,
686                                      "Logical Drive %d Now Exists\n",
687                                      ldev_num);
688         }
689         if (new->ldev_count < old.ldev_count) {
690                 int ldev_num = new->ldev_count - 1;
691
692                 while (++ldev_num < old.ldev_count)
693                         shost_printk(KERN_CRIT, cb->host,
694                                      "Logical Drive %d No Longer Exists\n",
695                                      ldev_num);
696         }
697         if (new->status.deferred != old.status.deferred)
698                 shost_printk(KERN_CRIT, cb->host,
699                              "Deferred Write Error Flag is now %s\n",
700                              (new->status.deferred ? "TRUE" : "FALSE"));
701         if (new->ev_seq != old.ev_seq) {
702                 cb->new_ev_seq = new->ev_seq;
703                 cb->need_err_info = true;
704                 shost_printk(KERN_INFO, cb->host,
705                              "Event log %d/%d (%d/%d) available\n",
706                              cb->old_ev_seq, cb->new_ev_seq,
707                              old.ev_seq, new->ev_seq);
708         }
709         if ((new->ldev_critical > 0 &&
710              new->ldev_critical != old.ldev_critical) ||
711             (new->ldev_offline > 0 &&
712              new->ldev_offline != old.ldev_offline) ||
713             (new->ldev_count != old.ldev_count)) {
714                 shost_printk(KERN_INFO, cb->host,
715                              "Logical drive count changed (%d/%d/%d)\n",
716                              new->ldev_critical,
717                              new->ldev_offline,
718                              new->ldev_count);
719                 cb->need_ldev_info = true;
720         }
721         if (new->pdev_dead > 0 ||
722             new->pdev_dead != old.pdev_dead ||
723             time_after_eq(jiffies, cb->secondary_monitor_time
724                           + MYRB_SECONDARY_MONITOR_INTERVAL)) {
725                 cb->need_bgi_status = cb->bgi_status_supported;
726                 cb->secondary_monitor_time = jiffies;
727         }
728         if (new->rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
729             new->rbld == MYRB_BG_RBLD_IN_PROGRESS ||
730             old.rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
731             old.rbld == MYRB_BG_RBLD_IN_PROGRESS) {
732                 cb->need_rbld = true;
733                 cb->rbld_first = (new->ldev_critical < old.ldev_critical);
734         }
735         if (old.rbld == MYRB_BG_CHECK_IN_PROGRESS)
736                 switch (new->rbld) {
737                 case MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS:
738                         shost_printk(KERN_INFO, cb->host,
739                                      "Consistency Check Completed Successfully\n");
740                         break;
741                 case MYRB_STDBY_RBLD_IN_PROGRESS:
742                 case MYRB_BG_RBLD_IN_PROGRESS:
743                         break;
744                 case MYRB_BG_CHECK_IN_PROGRESS:
745                         cb->need_cc_status = true;
746                         break;
747                 case MYRB_STDBY_RBLD_COMPLETED_WITH_ERROR:
748                         shost_printk(KERN_INFO, cb->host,
749                                      "Consistency Check Completed with Error\n");
750                         break;
751                 case MYRB_BG_RBLD_OR_CHECK_FAILED_DRIVE_FAILED:
752                         shost_printk(KERN_INFO, cb->host,
753                                      "Consistency Check Failed - Physical Device Failed\n");
754                         break;
755                 case MYRB_BG_RBLD_OR_CHECK_FAILED_LDEV_FAILED:
756                         shost_printk(KERN_INFO, cb->host,
757                                      "Consistency Check Failed - Logical Drive Failed\n");
758                         break;
759                 case MYRB_BG_RBLD_OR_CHECK_FAILED_OTHER:
760                         shost_printk(KERN_INFO, cb->host,
761                                      "Consistency Check Failed - Other Causes\n");
762                         break;
763                 case MYRB_BG_RBLD_OR_CHECK_SUCCESS_TERMINATED:
764                         shost_printk(KERN_INFO, cb->host,
765                                      "Consistency Check Successfully Terminated\n");
766                         break;
767                 }
768         else if (new->rbld == MYRB_BG_CHECK_IN_PROGRESS)
769                 cb->need_cc_status = true;
770
771         return MYRB_STATUS_SUCCESS;
772 }
773
774 /*
775  * myrb_set_pdev_state - sets the device state for a physical device
776  *
777  * Return: command status
778  */
779 static unsigned short myrb_set_pdev_state(struct myrb_hba *cb,
780                 struct scsi_device *sdev, enum myrb_devstate state)
781 {
782         struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
783         union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
784         unsigned short status;
785
786         mutex_lock(&cb->dcmd_mutex);
787         mbox->type3D.opcode = MYRB_CMD_START_DEVICE;
788         mbox->type3D.id = MYRB_DCMD_TAG;
789         mbox->type3D.channel = sdev->channel;
790         mbox->type3D.target = sdev->id;
791         mbox->type3D.state = state & 0x1F;
792         status = myrb_exec_cmd(cb, cmd_blk);
793         mutex_unlock(&cb->dcmd_mutex);
794
795         return status;
796 }
797
798 /*
799  * myrb_enable_mmio - enables the Memory Mailbox Interface
800  *
801  * PD and P controller types have no memory mailbox, but still need the
802  * other dma mapped memory.
803  *
804  * Return: true on success, false otherwise.
805  */
806 static bool myrb_enable_mmio(struct myrb_hba *cb, mbox_mmio_init_t mmio_init_fn)
807 {
808         void __iomem *base = cb->io_base;
809         struct pci_dev *pdev = cb->pdev;
810         size_t err_table_size;
811         size_t ldev_info_size;
812         union myrb_cmd_mbox *cmd_mbox_mem;
813         struct myrb_stat_mbox *stat_mbox_mem;
814         union myrb_cmd_mbox mbox;
815         unsigned short status;
816
817         memset(&mbox, 0, sizeof(union myrb_cmd_mbox));
818
819         if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
820                 dev_err(&pdev->dev, "DMA mask out of range\n");
821                 return false;
822         }
823
824         cb->enquiry = dma_alloc_coherent(&pdev->dev,
825                                          sizeof(struct myrb_enquiry),
826                                          &cb->enquiry_addr, GFP_KERNEL);
827         if (!cb->enquiry)
828                 return false;
829
830         err_table_size = sizeof(struct myrb_error_entry) *
831                 MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
832         cb->err_table = dma_alloc_coherent(&pdev->dev, err_table_size,
833                                            &cb->err_table_addr, GFP_KERNEL);
834         if (!cb->err_table)
835                 return false;
836
837         ldev_info_size = sizeof(struct myrb_ldev_info) * MYRB_MAX_LDEVS;
838         cb->ldev_info_buf = dma_alloc_coherent(&pdev->dev, ldev_info_size,
839                                                &cb->ldev_info_addr, GFP_KERNEL);
840         if (!cb->ldev_info_buf)
841                 return false;
842
843         /*
844          * Skip mailbox initialisation for PD and P Controllers
845          */
846         if (!mmio_init_fn)
847                 return true;
848
849         /* These are the base addresses for the command memory mailbox array */
850         cb->cmd_mbox_size =  MYRB_CMD_MBOX_COUNT * sizeof(union myrb_cmd_mbox);
851         cb->first_cmd_mbox = dma_alloc_coherent(&pdev->dev,
852                                                 cb->cmd_mbox_size,
853                                                 &cb->cmd_mbox_addr,
854                                                 GFP_KERNEL);
855         if (!cb->first_cmd_mbox)
856                 return false;
857
858         cmd_mbox_mem = cb->first_cmd_mbox;
859         cmd_mbox_mem += MYRB_CMD_MBOX_COUNT - 1;
860         cb->last_cmd_mbox = cmd_mbox_mem;
861         cb->next_cmd_mbox = cb->first_cmd_mbox;
862         cb->prev_cmd_mbox1 = cb->last_cmd_mbox;
863         cb->prev_cmd_mbox2 = cb->last_cmd_mbox - 1;
864
865         /* These are the base addresses for the status memory mailbox array */
866         cb->stat_mbox_size = MYRB_STAT_MBOX_COUNT *
867             sizeof(struct myrb_stat_mbox);
868         cb->first_stat_mbox = dma_alloc_coherent(&pdev->dev,
869                                                  cb->stat_mbox_size,
870                                                  &cb->stat_mbox_addr,
871                                                  GFP_KERNEL);
872         if (!cb->first_stat_mbox)
873                 return false;
874
875         stat_mbox_mem = cb->first_stat_mbox;
876         stat_mbox_mem += MYRB_STAT_MBOX_COUNT - 1;
877         cb->last_stat_mbox = stat_mbox_mem;
878         cb->next_stat_mbox = cb->first_stat_mbox;
879
880         /* Enable the Memory Mailbox Interface. */
881         cb->dual_mode_interface = true;
882         mbox.typeX.opcode = 0x2B;
883         mbox.typeX.id = 0;
884         mbox.typeX.opcode2 = 0x14;
885         mbox.typeX.cmd_mbox_addr = cb->cmd_mbox_addr;
886         mbox.typeX.stat_mbox_addr = cb->stat_mbox_addr;
887
888         status = mmio_init_fn(pdev, base, &mbox);
889         if (status != MYRB_STATUS_SUCCESS) {
890                 cb->dual_mode_interface = false;
891                 mbox.typeX.opcode2 = 0x10;
892                 status = mmio_init_fn(pdev, base, &mbox);
893                 if (status != MYRB_STATUS_SUCCESS) {
894                         dev_err(&pdev->dev,
895                                 "Failed to enable mailbox, statux %02X\n",
896                                 status);
897                         return false;
898                 }
899         }
900         return true;
901 }
902
903 /*
904  * myrb_get_hba_config - reads the configuration information
905  *
906  * Reads the configuration information from the controller and
907  * initializes the controller structure.
908  *
909  * Return: 0 on success, errno otherwise
910  */
911 static int myrb_get_hba_config(struct myrb_hba *cb)
912 {
913         struct myrb_enquiry2 *enquiry2;
914         dma_addr_t enquiry2_addr;
915         struct myrb_config2 *config2;
916         dma_addr_t config2_addr;
917         struct Scsi_Host *shost = cb->host;
918         struct pci_dev *pdev = cb->pdev;
919         int pchan_max = 0, pchan_cur = 0;
920         unsigned short status;
921         int ret = -ENODEV, memsize = 0;
922
923         enquiry2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
924                                       &enquiry2_addr, GFP_KERNEL);
925         if (!enquiry2) {
926                 shost_printk(KERN_ERR, cb->host,
927                              "Failed to allocate V1 enquiry2 memory\n");
928                 return -ENOMEM;
929         }
930         config2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_config2),
931                                      &config2_addr, GFP_KERNEL);
932         if (!config2) {
933                 shost_printk(KERN_ERR, cb->host,
934                              "Failed to allocate V1 config2 memory\n");
935                 dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
936                                   enquiry2, enquiry2_addr);
937                 return -ENOMEM;
938         }
939         mutex_lock(&cb->dma_mutex);
940         status = myrb_hba_enquiry(cb);
941         mutex_unlock(&cb->dma_mutex);
942         if (status != MYRB_STATUS_SUCCESS) {
943                 shost_printk(KERN_WARNING, cb->host,
944                              "Failed it issue V1 Enquiry\n");
945                 goto out_free;
946         }
947
948         status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY2, enquiry2_addr);
949         if (status != MYRB_STATUS_SUCCESS) {
950                 shost_printk(KERN_WARNING, cb->host,
951                              "Failed to issue V1 Enquiry2\n");
952                 goto out_free;
953         }
954
955         status = myrb_exec_type3(cb, MYRB_CMD_READ_CONFIG2, config2_addr);
956         if (status != MYRB_STATUS_SUCCESS) {
957                 shost_printk(KERN_WARNING, cb->host,
958                              "Failed to issue ReadConfig2\n");
959                 goto out_free;
960         }
961
962         status = myrb_get_ldev_info(cb);
963         if (status != MYRB_STATUS_SUCCESS) {
964                 shost_printk(KERN_WARNING, cb->host,
965                              "Failed to get logical drive information\n");
966                 goto out_free;
967         }
968
969         /*
970          * Initialize the Controller Model Name and Full Model Name fields.
971          */
972         switch (enquiry2->hw.sub_model) {
973         case DAC960_V1_P_PD_PU:
974                 if (enquiry2->scsi_cap.bus_speed == MYRB_SCSI_SPEED_ULTRA)
975                         strcpy(cb->model_name, "DAC960PU");
976                 else
977                         strcpy(cb->model_name, "DAC960PD");
978                 break;
979         case DAC960_V1_PL:
980                 strcpy(cb->model_name, "DAC960PL");
981                 break;
982         case DAC960_V1_PG:
983                 strcpy(cb->model_name, "DAC960PG");
984                 break;
985         case DAC960_V1_PJ:
986                 strcpy(cb->model_name, "DAC960PJ");
987                 break;
988         case DAC960_V1_PR:
989                 strcpy(cb->model_name, "DAC960PR");
990                 break;
991         case DAC960_V1_PT:
992                 strcpy(cb->model_name, "DAC960PT");
993                 break;
994         case DAC960_V1_PTL0:
995                 strcpy(cb->model_name, "DAC960PTL0");
996                 break;
997         case DAC960_V1_PRL:
998                 strcpy(cb->model_name, "DAC960PRL");
999                 break;
1000         case DAC960_V1_PTL1:
1001                 strcpy(cb->model_name, "DAC960PTL1");
1002                 break;
1003         case DAC960_V1_1164P:
1004                 strcpy(cb->model_name, "eXtremeRAID 1100");
1005                 break;
1006         default:
1007                 shost_printk(KERN_WARNING, cb->host,
1008                              "Unknown Model %X\n",
1009                              enquiry2->hw.sub_model);
1010                 goto out;
1011         }
1012         /*
1013          * Initialize the Controller Firmware Version field and verify that it
1014          * is a supported firmware version.
1015          * The supported firmware versions are:
1016          *
1017          * DAC1164P                 5.06 and above
1018          * DAC960PTL/PRL/PJ/PG      4.06 and above
1019          * DAC960PU/PD/PL           3.51 and above
1020          * DAC960PU/PD/PL/P         2.73 and above
1021          */
1022 #if defined(CONFIG_ALPHA)
1023         /*
1024          * DEC Alpha machines were often equipped with DAC960 cards that were
1025          * OEMed from Mylex, and had their own custom firmware. Version 2.70,
1026          * the last custom FW revision to be released by DEC for these older
1027          * controllers, appears to work quite well with this driver.
1028          *
1029          * Cards tested successfully were several versions each of the PD and
1030          * PU, called by DEC the KZPSC and KZPAC, respectively, and having
1031          * the Manufacturer Numbers (from Mylex), usually on a sticker on the
1032          * back of the board, of:
1033          *
1034          * KZPSC:  D040347 (1-channel) or D040348 (2-channel)
1035          *         or D040349 (3-channel)
1036          * KZPAC:  D040395 (1-channel) or D040396 (2-channel)
1037          *         or D040397 (3-channel)
1038          */
1039 # define FIRMWARE_27X   "2.70"
1040 #else
1041 # define FIRMWARE_27X   "2.73"
1042 #endif
1043
1044         if (enquiry2->fw.major_version == 0) {
1045                 enquiry2->fw.major_version = cb->enquiry->fw_major_version;
1046                 enquiry2->fw.minor_version = cb->enquiry->fw_minor_version;
1047                 enquiry2->fw.firmware_type = '0';
1048                 enquiry2->fw.turn_id = 0;
1049         }
1050         snprintf(cb->fw_version, sizeof(cb->fw_version),
1051                 "%u.%02u-%c-%02u",
1052                 enquiry2->fw.major_version,
1053                 enquiry2->fw.minor_version,
1054                 enquiry2->fw.firmware_type,
1055                 enquiry2->fw.turn_id);
1056         if (!((enquiry2->fw.major_version == 5 &&
1057                enquiry2->fw.minor_version >= 6) ||
1058               (enquiry2->fw.major_version == 4 &&
1059                enquiry2->fw.minor_version >= 6) ||
1060               (enquiry2->fw.major_version == 3 &&
1061                enquiry2->fw.minor_version >= 51) ||
1062               (enquiry2->fw.major_version == 2 &&
1063                strcmp(cb->fw_version, FIRMWARE_27X) >= 0))) {
1064                 shost_printk(KERN_WARNING, cb->host,
1065                         "Firmware Version '%s' unsupported\n",
1066                         cb->fw_version);
1067                 goto out;
1068         }
1069         /*
1070          * Initialize the Channels, Targets, Memory Size, and SAF-TE
1071          * Enclosure Management Enabled fields.
1072          */
1073         switch (enquiry2->hw.model) {
1074         case MYRB_5_CHANNEL_BOARD:
1075                 pchan_max = 5;
1076                 break;
1077         case MYRB_3_CHANNEL_BOARD:
1078         case MYRB_3_CHANNEL_ASIC_DAC:
1079                 pchan_max = 3;
1080                 break;
1081         case MYRB_2_CHANNEL_BOARD:
1082                 pchan_max = 2;
1083                 break;
1084         default:
1085                 pchan_max = enquiry2->cfg_chan;
1086                 break;
1087         }
1088         pchan_cur = enquiry2->cur_chan;
1089         if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_32BIT)
1090                 cb->bus_width = 32;
1091         else if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_16BIT)
1092                 cb->bus_width = 16;
1093         else
1094                 cb->bus_width = 8;
1095         cb->ldev_block_size = enquiry2->ldev_block_size;
1096         shost->max_channel = pchan_cur;
1097         shost->max_id = enquiry2->max_targets;
1098         memsize = enquiry2->mem_size >> 20;
1099         cb->safte_enabled = (enquiry2->fault_mgmt == MYRB_FAULT_SAFTE);
1100         /*
1101          * Initialize the Controller Queue Depth, Driver Queue Depth,
1102          * Logical Drive Count, Maximum Blocks per Command, Controller
1103          * Scatter/Gather Limit, and Driver Scatter/Gather Limit.
1104          * The Driver Queue Depth must be at most one less than the
1105          * Controller Queue Depth to allow for an automatic drive
1106          * rebuild operation.
1107          */
1108         shost->can_queue = cb->enquiry->max_tcq;
1109         if (shost->can_queue < 3)
1110                 shost->can_queue = enquiry2->max_cmds;
1111         if (shost->can_queue < 3)
1112                 /* Play safe and disable TCQ */
1113                 shost->can_queue = 1;
1114
1115         if (shost->can_queue > MYRB_CMD_MBOX_COUNT - 2)
1116                 shost->can_queue = MYRB_CMD_MBOX_COUNT - 2;
1117         shost->max_sectors = enquiry2->max_sectors;
1118         shost->sg_tablesize = enquiry2->max_sge;
1119         if (shost->sg_tablesize > MYRB_SCATTER_GATHER_LIMIT)
1120                 shost->sg_tablesize = MYRB_SCATTER_GATHER_LIMIT;
1121         /*
1122          * Initialize the Stripe Size, Segment Size, and Geometry Translation.
1123          */
1124         cb->stripe_size = config2->blocks_per_stripe * config2->block_factor
1125                 >> (10 - MYRB_BLKSIZE_BITS);
1126         cb->segment_size = config2->blocks_per_cacheline * config2->block_factor
1127                 >> (10 - MYRB_BLKSIZE_BITS);
1128         /* Assume 255/63 translation */
1129         cb->ldev_geom_heads = 255;
1130         cb->ldev_geom_sectors = 63;
1131         if (config2->drive_geometry) {
1132                 cb->ldev_geom_heads = 128;
1133                 cb->ldev_geom_sectors = 32;
1134         }
1135
1136         /*
1137          * Initialize the Background Initialization Status.
1138          */
1139         if ((cb->fw_version[0] == '4' &&
1140              strcmp(cb->fw_version, "4.08") >= 0) ||
1141             (cb->fw_version[0] == '5' &&
1142              strcmp(cb->fw_version, "5.08") >= 0)) {
1143                 cb->bgi_status_supported = true;
1144                 myrb_bgi_control(cb);
1145         }
1146         cb->last_rbld_status = MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS;
1147         ret = 0;
1148
1149 out:
1150         shost_printk(KERN_INFO, cb->host,
1151                 "Configuring %s PCI RAID Controller\n", cb->model_name);
1152         shost_printk(KERN_INFO, cb->host,
1153                 "  Firmware Version: %s, Memory Size: %dMB\n",
1154                 cb->fw_version, memsize);
1155         if (cb->io_addr == 0)
1156                 shost_printk(KERN_INFO, cb->host,
1157                         "  I/O Address: n/a, PCI Address: 0x%lX, IRQ Channel: %d\n",
1158                         (unsigned long)cb->pci_addr, cb->irq);
1159         else
1160                 shost_printk(KERN_INFO, cb->host,
1161                         "  I/O Address: 0x%lX, PCI Address: 0x%lX, IRQ Channel: %d\n",
1162                         (unsigned long)cb->io_addr, (unsigned long)cb->pci_addr,
1163                         cb->irq);
1164         shost_printk(KERN_INFO, cb->host,
1165                 "  Controller Queue Depth: %d, Maximum Blocks per Command: %d\n",
1166                 cb->host->can_queue, cb->host->max_sectors);
1167         shost_printk(KERN_INFO, cb->host,
1168                      "  Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n",
1169                      cb->host->can_queue, cb->host->sg_tablesize,
1170                      MYRB_SCATTER_GATHER_LIMIT);
1171         shost_printk(KERN_INFO, cb->host,
1172                      "  Stripe Size: %dKB, Segment Size: %dKB, BIOS Geometry: %d/%d%s\n",
1173                      cb->stripe_size, cb->segment_size,
1174                      cb->ldev_geom_heads, cb->ldev_geom_sectors,
1175                      cb->safte_enabled ?
1176                      "  SAF-TE Enclosure Management Enabled" : "");
1177         shost_printk(KERN_INFO, cb->host,
1178                      "  Physical: %d/%d channels %d/%d/%d devices\n",
1179                      pchan_cur, pchan_max, 0, cb->enquiry->pdev_dead,
1180                      cb->host->max_id);
1181
1182         shost_printk(KERN_INFO, cb->host,
1183                      "  Logical: 1/1 channels, %d/%d disks\n",
1184                      cb->enquiry->ldev_count, MYRB_MAX_LDEVS);
1185
1186 out_free:
1187         dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
1188                           enquiry2, enquiry2_addr);
1189         dma_free_coherent(&pdev->dev, sizeof(struct myrb_config2),
1190                           config2, config2_addr);
1191
1192         return ret;
1193 }
1194
1195 /*
1196  * myrb_unmap - unmaps controller structures
1197  */
1198 static void myrb_unmap(struct myrb_hba *cb)
1199 {
1200         if (cb->ldev_info_buf) {
1201                 size_t ldev_info_size = sizeof(struct myrb_ldev_info) *
1202                         MYRB_MAX_LDEVS;
1203                 dma_free_coherent(&cb->pdev->dev, ldev_info_size,
1204                                   cb->ldev_info_buf, cb->ldev_info_addr);
1205                 cb->ldev_info_buf = NULL;
1206         }
1207         if (cb->err_table) {
1208                 size_t err_table_size = sizeof(struct myrb_error_entry) *
1209                         MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
1210                 dma_free_coherent(&cb->pdev->dev, err_table_size,
1211                                   cb->err_table, cb->err_table_addr);
1212                 cb->err_table = NULL;
1213         }
1214         if (cb->enquiry) {
1215                 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_enquiry),
1216                                   cb->enquiry, cb->enquiry_addr);
1217                 cb->enquiry = NULL;
1218         }
1219         if (cb->first_stat_mbox) {
1220                 dma_free_coherent(&cb->pdev->dev, cb->stat_mbox_size,
1221                                   cb->first_stat_mbox, cb->stat_mbox_addr);
1222                 cb->first_stat_mbox = NULL;
1223         }
1224         if (cb->first_cmd_mbox) {
1225                 dma_free_coherent(&cb->pdev->dev, cb->cmd_mbox_size,
1226                                   cb->first_cmd_mbox, cb->cmd_mbox_addr);
1227                 cb->first_cmd_mbox = NULL;
1228         }
1229 }
1230
1231 /*
1232  * myrb_cleanup - cleanup controller structures
1233  */
1234 static void myrb_cleanup(struct myrb_hba *cb)
1235 {
1236         struct pci_dev *pdev = cb->pdev;
1237
1238         /* Free the memory mailbox, status, and related structures */
1239         myrb_unmap(cb);
1240
1241         if (cb->mmio_base) {
1242                 if (cb->disable_intr)
1243                         cb->disable_intr(cb->io_base);
1244                 iounmap(cb->mmio_base);
1245         }
1246         if (cb->irq)
1247                 free_irq(cb->irq, cb);
1248         if (cb->io_addr)
1249                 release_region(cb->io_addr, 0x80);
1250         pci_set_drvdata(pdev, NULL);
1251         pci_disable_device(pdev);
1252         scsi_host_put(cb->host);
1253 }
1254
1255 static int myrb_host_reset(struct scsi_cmnd *scmd)
1256 {
1257         struct Scsi_Host *shost = scmd->device->host;
1258         struct myrb_hba *cb = shost_priv(shost);
1259
1260         cb->reset(cb->io_base);
1261         return SUCCESS;
1262 }
1263
1264 static int myrb_pthru_queuecommand(struct Scsi_Host *shost,
1265                 struct scsi_cmnd *scmd)
1266 {
1267         struct request *rq = scsi_cmd_to_rq(scmd);
1268         struct myrb_hba *cb = shost_priv(shost);
1269         struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
1270         union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
1271         struct myrb_dcdb *dcdb;
1272         dma_addr_t dcdb_addr;
1273         struct scsi_device *sdev = scmd->device;
1274         struct scatterlist *sgl;
1275         unsigned long flags;
1276         int nsge;
1277
1278         myrb_reset_cmd(cmd_blk);
1279         dcdb = dma_pool_alloc(cb->dcdb_pool, GFP_ATOMIC, &dcdb_addr);
1280         if (!dcdb)
1281                 return SCSI_MLQUEUE_HOST_BUSY;
1282         nsge = scsi_dma_map(scmd);
1283         if (nsge > 1) {
1284                 dma_pool_free(cb->dcdb_pool, dcdb, dcdb_addr);
1285                 scmd->result = (DID_ERROR << 16);
1286                 scmd->scsi_done(scmd);
1287                 return 0;
1288         }
1289
1290         mbox->type3.opcode = MYRB_CMD_DCDB;
1291         mbox->type3.id = rq->tag + 3;
1292         mbox->type3.addr = dcdb_addr;
1293         dcdb->channel = sdev->channel;
1294         dcdb->target = sdev->id;
1295         switch (scmd->sc_data_direction) {
1296         case DMA_NONE:
1297                 dcdb->data_xfer = MYRB_DCDB_XFER_NONE;
1298                 break;
1299         case DMA_TO_DEVICE:
1300                 dcdb->data_xfer = MYRB_DCDB_XFER_SYSTEM_TO_DEVICE;
1301                 break;
1302         case DMA_FROM_DEVICE:
1303                 dcdb->data_xfer = MYRB_DCDB_XFER_DEVICE_TO_SYSTEM;
1304                 break;
1305         default:
1306                 dcdb->data_xfer = MYRB_DCDB_XFER_ILLEGAL;
1307                 break;
1308         }
1309         dcdb->early_status = false;
1310         if (rq->timeout <= 10)
1311                 dcdb->timeout = MYRB_DCDB_TMO_10_SECS;
1312         else if (rq->timeout <= 60)
1313                 dcdb->timeout = MYRB_DCDB_TMO_60_SECS;
1314         else if (rq->timeout <= 600)
1315                 dcdb->timeout = MYRB_DCDB_TMO_10_MINS;
1316         else
1317                 dcdb->timeout = MYRB_DCDB_TMO_24_HRS;
1318         dcdb->no_autosense = false;
1319         dcdb->allow_disconnect = true;
1320         sgl = scsi_sglist(scmd);
1321         dcdb->dma_addr = sg_dma_address(sgl);
1322         if (sg_dma_len(sgl) > USHRT_MAX) {
1323                 dcdb->xfer_len_lo = sg_dma_len(sgl) & 0xffff;
1324                 dcdb->xfer_len_hi4 = sg_dma_len(sgl) >> 16;
1325         } else {
1326                 dcdb->xfer_len_lo = sg_dma_len(sgl);
1327                 dcdb->xfer_len_hi4 = 0;
1328         }
1329         dcdb->cdb_len = scmd->cmd_len;
1330         dcdb->sense_len = sizeof(dcdb->sense);
1331         memcpy(&dcdb->cdb, scmd->cmnd, scmd->cmd_len);
1332
1333         spin_lock_irqsave(&cb->queue_lock, flags);
1334         cb->qcmd(cb, cmd_blk);
1335         spin_unlock_irqrestore(&cb->queue_lock, flags);
1336         return 0;
1337 }
1338
1339 static void myrb_inquiry(struct myrb_hba *cb,
1340                 struct scsi_cmnd *scmd)
1341 {
1342         unsigned char inq[36] = {
1343                 0x00, 0x00, 0x03, 0x02, 0x20, 0x00, 0x01, 0x00,
1344                 0x4d, 0x59, 0x4c, 0x45, 0x58, 0x20, 0x20, 0x20,
1345                 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1346                 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1347                 0x20, 0x20, 0x20, 0x20,
1348         };
1349
1350         if (cb->bus_width > 16)
1351                 inq[7] |= 1 << 6;
1352         if (cb->bus_width > 8)
1353                 inq[7] |= 1 << 5;
1354         memcpy(&inq[16], cb->model_name, 16);
1355         memcpy(&inq[32], cb->fw_version, 1);
1356         memcpy(&inq[33], &cb->fw_version[2], 2);
1357         memcpy(&inq[35], &cb->fw_version[7], 1);
1358
1359         scsi_sg_copy_from_buffer(scmd, (void *)inq, 36);
1360 }
1361
1362 static void
1363 myrb_mode_sense(struct myrb_hba *cb, struct scsi_cmnd *scmd,
1364                 struct myrb_ldev_info *ldev_info)
1365 {
1366         unsigned char modes[32], *mode_pg;
1367         bool dbd;
1368         size_t mode_len;
1369
1370         dbd = (scmd->cmnd[1] & 0x08) == 0x08;
1371         if (dbd) {
1372                 mode_len = 24;
1373                 mode_pg = &modes[4];
1374         } else {
1375                 mode_len = 32;
1376                 mode_pg = &modes[12];
1377         }
1378         memset(modes, 0, sizeof(modes));
1379         modes[0] = mode_len - 1;
1380         if (!dbd) {
1381                 unsigned char *block_desc = &modes[4];
1382
1383                 modes[3] = 8;
1384                 put_unaligned_be32(ldev_info->size, &block_desc[0]);
1385                 put_unaligned_be32(cb->ldev_block_size, &block_desc[5]);
1386         }
1387         mode_pg[0] = 0x08;
1388         mode_pg[1] = 0x12;
1389         if (ldev_info->wb_enabled)
1390                 mode_pg[2] |= 0x04;
1391         if (cb->segment_size) {
1392                 mode_pg[2] |= 0x08;
1393                 put_unaligned_be16(cb->segment_size, &mode_pg[14]);
1394         }
1395
1396         scsi_sg_copy_from_buffer(scmd, modes, mode_len);
1397 }
1398
1399 static void myrb_request_sense(struct myrb_hba *cb,
1400                 struct scsi_cmnd *scmd)
1401 {
1402         scsi_build_sense(scmd, 0, NO_SENSE, 0, 0);
1403         scsi_sg_copy_from_buffer(scmd, scmd->sense_buffer,
1404                                  SCSI_SENSE_BUFFERSIZE);
1405 }
1406
1407 static void myrb_read_capacity(struct myrb_hba *cb, struct scsi_cmnd *scmd,
1408                 struct myrb_ldev_info *ldev_info)
1409 {
1410         unsigned char data[8];
1411
1412         dev_dbg(&scmd->device->sdev_gendev,
1413                 "Capacity %u, blocksize %u\n",
1414                 ldev_info->size, cb->ldev_block_size);
1415         put_unaligned_be32(ldev_info->size - 1, &data[0]);
1416         put_unaligned_be32(cb->ldev_block_size, &data[4]);
1417         scsi_sg_copy_from_buffer(scmd, data, 8);
1418 }
1419
1420 static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
1421                 struct scsi_cmnd *scmd)
1422 {
1423         struct myrb_hba *cb = shost_priv(shost);
1424         struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
1425         union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
1426         struct myrb_ldev_info *ldev_info;
1427         struct scsi_device *sdev = scmd->device;
1428         struct scatterlist *sgl;
1429         unsigned long flags;
1430         u64 lba;
1431         u32 block_cnt;
1432         int nsge;
1433
1434         ldev_info = sdev->hostdata;
1435         if (ldev_info->state != MYRB_DEVICE_ONLINE &&
1436             ldev_info->state != MYRB_DEVICE_WO) {
1437                 dev_dbg(&shost->shost_gendev, "ldev %u in state %x, skip\n",
1438                         sdev->id, ldev_info ? ldev_info->state : 0xff);
1439                 scmd->result = (DID_BAD_TARGET << 16);
1440                 scmd->scsi_done(scmd);
1441                 return 0;
1442         }
1443         switch (scmd->cmnd[0]) {
1444         case TEST_UNIT_READY:
1445                 scmd->result = (DID_OK << 16);
1446                 scmd->scsi_done(scmd);
1447                 return 0;
1448         case INQUIRY:
1449                 if (scmd->cmnd[1] & 1) {
1450                         /* Illegal request, invalid field in CDB */
1451                         scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1452                 } else {
1453                         myrb_inquiry(cb, scmd);
1454                         scmd->result = (DID_OK << 16);
1455                 }
1456                 scmd->scsi_done(scmd);
1457                 return 0;
1458         case SYNCHRONIZE_CACHE:
1459                 scmd->result = (DID_OK << 16);
1460                 scmd->scsi_done(scmd);
1461                 return 0;
1462         case MODE_SENSE:
1463                 if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
1464                     (scmd->cmnd[2] & 0x3F) != 0x08) {
1465                         /* Illegal request, invalid field in CDB */
1466                         scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1467                 } else {
1468                         myrb_mode_sense(cb, scmd, ldev_info);
1469                         scmd->result = (DID_OK << 16);
1470                 }
1471                 scmd->scsi_done(scmd);
1472                 return 0;
1473         case READ_CAPACITY:
1474                 if ((scmd->cmnd[1] & 1) ||
1475                     (scmd->cmnd[8] & 1)) {
1476                         /* Illegal request, invalid field in CDB */
1477                         scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1478                         scmd->scsi_done(scmd);
1479                         return 0;
1480                 }
1481                 lba = get_unaligned_be32(&scmd->cmnd[2]);
1482                 if (lba) {
1483                         /* Illegal request, invalid field in CDB */
1484                         scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1485                         scmd->scsi_done(scmd);
1486                         return 0;
1487                 }
1488                 myrb_read_capacity(cb, scmd, ldev_info);
1489                 scmd->scsi_done(scmd);
1490                 return 0;
1491         case REQUEST_SENSE:
1492                 myrb_request_sense(cb, scmd);
1493                 scmd->result = (DID_OK << 16);
1494                 return 0;
1495         case SEND_DIAGNOSTIC:
1496                 if (scmd->cmnd[1] != 0x04) {
1497                         /* Illegal request, invalid field in CDB */
1498                         scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1499                 } else {
1500                         /* Assume good status */
1501                         scmd->result = (DID_OK << 16);
1502                 }
1503                 scmd->scsi_done(scmd);
1504                 return 0;
1505         case READ_6:
1506                 if (ldev_info->state == MYRB_DEVICE_WO) {
1507                         /* Data protect, attempt to read invalid data */
1508                         scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06);
1509                         scmd->scsi_done(scmd);
1510                         return 0;
1511                 }
1512                 fallthrough;
1513         case WRITE_6:
1514                 lba = (((scmd->cmnd[1] & 0x1F) << 16) |
1515                        (scmd->cmnd[2] << 8) |
1516                        scmd->cmnd[3]);
1517                 block_cnt = scmd->cmnd[4];
1518                 break;
1519         case READ_10:
1520                 if (ldev_info->state == MYRB_DEVICE_WO) {
1521                         /* Data protect, attempt to read invalid data */
1522                         scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06);
1523                         scmd->scsi_done(scmd);
1524                         return 0;
1525                 }
1526                 fallthrough;
1527         case WRITE_10:
1528         case VERIFY:            /* 0x2F */
1529         case WRITE_VERIFY:      /* 0x2E */
1530                 lba = get_unaligned_be32(&scmd->cmnd[2]);
1531                 block_cnt = get_unaligned_be16(&scmd->cmnd[7]);
1532                 break;
1533         case READ_12:
1534                 if (ldev_info->state == MYRB_DEVICE_WO) {
1535                         /* Data protect, attempt to read invalid data */
1536                         scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06);
1537                         scmd->scsi_done(scmd);
1538                         return 0;
1539                 }
1540                 fallthrough;
1541         case WRITE_12:
1542         case VERIFY_12: /* 0xAF */
1543         case WRITE_VERIFY_12:   /* 0xAE */
1544                 lba = get_unaligned_be32(&scmd->cmnd[2]);
1545                 block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
1546                 break;
1547         default:
1548                 /* Illegal request, invalid opcode */
1549                 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x20, 0);
1550                 scmd->scsi_done(scmd);
1551                 return 0;
1552         }
1553
1554         myrb_reset_cmd(cmd_blk);
1555         mbox->type5.id = scsi_cmd_to_rq(scmd)->tag + 3;
1556         if (scmd->sc_data_direction == DMA_NONE)
1557                 goto submit;
1558         nsge = scsi_dma_map(scmd);
1559         if (nsge == 1) {
1560                 sgl = scsi_sglist(scmd);
1561                 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
1562                         mbox->type5.opcode = MYRB_CMD_READ;
1563                 else
1564                         mbox->type5.opcode = MYRB_CMD_WRITE;
1565
1566                 mbox->type5.ld.xfer_len = block_cnt;
1567                 mbox->type5.ld.ldev_num = sdev->id;
1568                 mbox->type5.lba = lba;
1569                 mbox->type5.addr = (u32)sg_dma_address(sgl);
1570         } else {
1571                 struct myrb_sge *hw_sgl;
1572                 dma_addr_t hw_sgl_addr;
1573                 int i;
1574
1575                 hw_sgl = dma_pool_alloc(cb->sg_pool, GFP_ATOMIC, &hw_sgl_addr);
1576                 if (!hw_sgl)
1577                         return SCSI_MLQUEUE_HOST_BUSY;
1578
1579                 cmd_blk->sgl = hw_sgl;
1580                 cmd_blk->sgl_addr = hw_sgl_addr;
1581
1582                 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
1583                         mbox->type5.opcode = MYRB_CMD_READ_SG;
1584                 else
1585                         mbox->type5.opcode = MYRB_CMD_WRITE_SG;
1586
1587                 mbox->type5.ld.xfer_len = block_cnt;
1588                 mbox->type5.ld.ldev_num = sdev->id;
1589                 mbox->type5.lba = lba;
1590                 mbox->type5.addr = hw_sgl_addr;
1591                 mbox->type5.sg_count = nsge;
1592
1593                 scsi_for_each_sg(scmd, sgl, nsge, i) {
1594                         hw_sgl->sge_addr = (u32)sg_dma_address(sgl);
1595                         hw_sgl->sge_count = (u32)sg_dma_len(sgl);
1596                         hw_sgl++;
1597                 }
1598         }
1599 submit:
1600         spin_lock_irqsave(&cb->queue_lock, flags);
1601         cb->qcmd(cb, cmd_blk);
1602         spin_unlock_irqrestore(&cb->queue_lock, flags);
1603
1604         return 0;
1605 }
1606
1607 static int myrb_queuecommand(struct Scsi_Host *shost,
1608                 struct scsi_cmnd *scmd)
1609 {
1610         struct scsi_device *sdev = scmd->device;
1611
1612         if (sdev->channel > myrb_logical_channel(shost)) {
1613                 scmd->result = (DID_BAD_TARGET << 16);
1614                 scmd->scsi_done(scmd);
1615                 return 0;
1616         }
1617         if (sdev->channel == myrb_logical_channel(shost))
1618                 return myrb_ldev_queuecommand(shost, scmd);
1619
1620         return myrb_pthru_queuecommand(shost, scmd);
1621 }
1622
1623 static int myrb_ldev_slave_alloc(struct scsi_device *sdev)
1624 {
1625         struct myrb_hba *cb = shost_priv(sdev->host);
1626         struct myrb_ldev_info *ldev_info;
1627         unsigned short ldev_num = sdev->id;
1628         enum raid_level level;
1629
1630         ldev_info = cb->ldev_info_buf + ldev_num;
1631         if (!ldev_info)
1632                 return -ENXIO;
1633
1634         sdev->hostdata = kzalloc(sizeof(*ldev_info), GFP_KERNEL);
1635         if (!sdev->hostdata)
1636                 return -ENOMEM;
1637         dev_dbg(&sdev->sdev_gendev,
1638                 "slave alloc ldev %d state %x\n",
1639                 ldev_num, ldev_info->state);
1640         memcpy(sdev->hostdata, ldev_info,
1641                sizeof(*ldev_info));
1642         switch (ldev_info->raid_level) {
1643         case MYRB_RAID_LEVEL0:
1644                 level = RAID_LEVEL_LINEAR;
1645                 break;
1646         case MYRB_RAID_LEVEL1:
1647                 level = RAID_LEVEL_1;
1648                 break;
1649         case MYRB_RAID_LEVEL3:
1650                 level = RAID_LEVEL_3;
1651                 break;
1652         case MYRB_RAID_LEVEL5:
1653                 level = RAID_LEVEL_5;
1654                 break;
1655         case MYRB_RAID_LEVEL6:
1656                 level = RAID_LEVEL_6;
1657                 break;
1658         case MYRB_RAID_JBOD:
1659                 level = RAID_LEVEL_JBOD;
1660                 break;
1661         default:
1662                 level = RAID_LEVEL_UNKNOWN;
1663                 break;
1664         }
1665         raid_set_level(myrb_raid_template, &sdev->sdev_gendev, level);
1666         return 0;
1667 }
1668
1669 static int myrb_pdev_slave_alloc(struct scsi_device *sdev)
1670 {
1671         struct myrb_hba *cb = shost_priv(sdev->host);
1672         struct myrb_pdev_state *pdev_info;
1673         unsigned short status;
1674
1675         if (sdev->id > MYRB_MAX_TARGETS)
1676                 return -ENXIO;
1677
1678         pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA);
1679         if (!pdev_info)
1680                 return -ENOMEM;
1681
1682         status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
1683                                   sdev, pdev_info);
1684         if (status != MYRB_STATUS_SUCCESS) {
1685                 dev_dbg(&sdev->sdev_gendev,
1686                         "Failed to get device state, status %x\n",
1687                         status);
1688                 kfree(pdev_info);
1689                 return -ENXIO;
1690         }
1691         if (!pdev_info->present) {
1692                 dev_dbg(&sdev->sdev_gendev,
1693                         "device not present, skip\n");
1694                 kfree(pdev_info);
1695                 return -ENXIO;
1696         }
1697         dev_dbg(&sdev->sdev_gendev,
1698                 "slave alloc pdev %d:%d state %x\n",
1699                 sdev->channel, sdev->id, pdev_info->state);
1700         sdev->hostdata = pdev_info;
1701
1702         return 0;
1703 }
1704
1705 static int myrb_slave_alloc(struct scsi_device *sdev)
1706 {
1707         if (sdev->channel > myrb_logical_channel(sdev->host))
1708                 return -ENXIO;
1709
1710         if (sdev->lun > 0)
1711                 return -ENXIO;
1712
1713         if (sdev->channel == myrb_logical_channel(sdev->host))
1714                 return myrb_ldev_slave_alloc(sdev);
1715
1716         return myrb_pdev_slave_alloc(sdev);
1717 }
1718
1719 static int myrb_slave_configure(struct scsi_device *sdev)
1720 {
1721         struct myrb_ldev_info *ldev_info;
1722
1723         if (sdev->channel > myrb_logical_channel(sdev->host))
1724                 return -ENXIO;
1725
1726         if (sdev->channel < myrb_logical_channel(sdev->host)) {
1727                 sdev->no_uld_attach = 1;
1728                 return 0;
1729         }
1730         if (sdev->lun != 0)
1731                 return -ENXIO;
1732
1733         ldev_info = sdev->hostdata;
1734         if (!ldev_info)
1735                 return -ENXIO;
1736         if (ldev_info->state != MYRB_DEVICE_ONLINE)
1737                 sdev_printk(KERN_INFO, sdev,
1738                             "Logical drive is %s\n",
1739                             myrb_devstate_name(ldev_info->state));
1740
1741         sdev->tagged_supported = 1;
1742         return 0;
1743 }
1744
1745 static void myrb_slave_destroy(struct scsi_device *sdev)
1746 {
1747         kfree(sdev->hostdata);
1748 }
1749
1750 static int myrb_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1751                 sector_t capacity, int geom[])
1752 {
1753         struct myrb_hba *cb = shost_priv(sdev->host);
1754
1755         geom[0] = cb->ldev_geom_heads;
1756         geom[1] = cb->ldev_geom_sectors;
1757         geom[2] = sector_div(capacity, geom[0] * geom[1]);
1758
1759         return 0;
1760 }
1761
1762 static ssize_t raid_state_show(struct device *dev,
1763                 struct device_attribute *attr, char *buf)
1764 {
1765         struct scsi_device *sdev = to_scsi_device(dev);
1766         struct myrb_hba *cb = shost_priv(sdev->host);
1767         int ret;
1768
1769         if (!sdev->hostdata)
1770                 return snprintf(buf, 16, "Unknown\n");
1771
1772         if (sdev->channel == myrb_logical_channel(sdev->host)) {
1773                 struct myrb_ldev_info *ldev_info = sdev->hostdata;
1774                 const char *name;
1775
1776                 name = myrb_devstate_name(ldev_info->state);
1777                 if (name)
1778                         ret = snprintf(buf, 32, "%s\n", name);
1779                 else
1780                         ret = snprintf(buf, 32, "Invalid (%02X)\n",
1781                                        ldev_info->state);
1782         } else {
1783                 struct myrb_pdev_state *pdev_info = sdev->hostdata;
1784                 unsigned short status;
1785                 const char *name;
1786
1787                 status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
1788                                           sdev, pdev_info);
1789                 if (status != MYRB_STATUS_SUCCESS)
1790                         sdev_printk(KERN_INFO, sdev,
1791                                     "Failed to get device state, status %x\n",
1792                                     status);
1793
1794                 if (!pdev_info->present)
1795                         name = "Removed";
1796                 else
1797                         name = myrb_devstate_name(pdev_info->state);
1798                 if (name)
1799                         ret = snprintf(buf, 32, "%s\n", name);
1800                 else
1801                         ret = snprintf(buf, 32, "Invalid (%02X)\n",
1802                                        pdev_info->state);
1803         }
1804         return ret;
1805 }
1806
1807 static ssize_t raid_state_store(struct device *dev,
1808                 struct device_attribute *attr, const char *buf, size_t count)
1809 {
1810         struct scsi_device *sdev = to_scsi_device(dev);
1811         struct myrb_hba *cb = shost_priv(sdev->host);
1812         struct myrb_pdev_state *pdev_info;
1813         enum myrb_devstate new_state;
1814         unsigned short status;
1815
1816         if (!strncmp(buf, "kill", 4) ||
1817             !strncmp(buf, "offline", 7))
1818                 new_state = MYRB_DEVICE_DEAD;
1819         else if (!strncmp(buf, "online", 6))
1820                 new_state = MYRB_DEVICE_ONLINE;
1821         else if (!strncmp(buf, "standby", 7))
1822                 new_state = MYRB_DEVICE_STANDBY;
1823         else
1824                 return -EINVAL;
1825
1826         pdev_info = sdev->hostdata;
1827         if (!pdev_info) {
1828                 sdev_printk(KERN_INFO, sdev,
1829                             "Failed - no physical device information\n");
1830                 return -ENXIO;
1831         }
1832         if (!pdev_info->present) {
1833                 sdev_printk(KERN_INFO, sdev,
1834                             "Failed - device not present\n");
1835                 return -ENXIO;
1836         }
1837
1838         if (pdev_info->state == new_state)
1839                 return count;
1840
1841         status = myrb_set_pdev_state(cb, sdev, new_state);
1842         switch (status) {
1843         case MYRB_STATUS_SUCCESS:
1844                 break;
1845         case MYRB_STATUS_START_DEVICE_FAILED:
1846                 sdev_printk(KERN_INFO, sdev,
1847                              "Failed - Unable to Start Device\n");
1848                 count = -EAGAIN;
1849                 break;
1850         case MYRB_STATUS_NO_DEVICE:
1851                 sdev_printk(KERN_INFO, sdev,
1852                             "Failed - No Device at Address\n");
1853                 count = -ENODEV;
1854                 break;
1855         case MYRB_STATUS_INVALID_CHANNEL_OR_TARGET:
1856                 sdev_printk(KERN_INFO, sdev,
1857                          "Failed - Invalid Channel or Target or Modifier\n");
1858                 count = -EINVAL;
1859                 break;
1860         case MYRB_STATUS_CHANNEL_BUSY:
1861                 sdev_printk(KERN_INFO, sdev,
1862                          "Failed - Channel Busy\n");
1863                 count = -EBUSY;
1864                 break;
1865         default:
1866                 sdev_printk(KERN_INFO, sdev,
1867                          "Failed - Unexpected Status %04X\n", status);
1868                 count = -EIO;
1869                 break;
1870         }
1871         return count;
1872 }
1873 static DEVICE_ATTR_RW(raid_state);
1874
1875 static ssize_t raid_level_show(struct device *dev,
1876                 struct device_attribute *attr, char *buf)
1877 {
1878         struct scsi_device *sdev = to_scsi_device(dev);
1879
1880         if (sdev->channel == myrb_logical_channel(sdev->host)) {
1881                 struct myrb_ldev_info *ldev_info = sdev->hostdata;
1882                 const char *name;
1883
1884                 if (!ldev_info)
1885                         return -ENXIO;
1886
1887                 name = myrb_raidlevel_name(ldev_info->raid_level);
1888                 if (!name)
1889                         return snprintf(buf, 32, "Invalid (%02X)\n",
1890                                         ldev_info->state);
1891                 return snprintf(buf, 32, "%s\n", name);
1892         }
1893         return snprintf(buf, 32, "Physical Drive\n");
1894 }
1895 static DEVICE_ATTR_RO(raid_level);
1896
1897 static ssize_t rebuild_show(struct device *dev,
1898                 struct device_attribute *attr, char *buf)
1899 {
1900         struct scsi_device *sdev = to_scsi_device(dev);
1901         struct myrb_hba *cb = shost_priv(sdev->host);
1902         struct myrb_rbld_progress rbld_buf;
1903         unsigned char status;
1904
1905         if (sdev->channel < myrb_logical_channel(sdev->host))
1906                 return snprintf(buf, 32, "physical device - not rebuilding\n");
1907
1908         status = myrb_get_rbld_progress(cb, &rbld_buf);
1909
1910         if (rbld_buf.ldev_num != sdev->id ||
1911             status != MYRB_STATUS_SUCCESS)
1912                 return snprintf(buf, 32, "not rebuilding\n");
1913
1914         return snprintf(buf, 32, "rebuilding block %u of %u\n",
1915                         rbld_buf.ldev_size - rbld_buf.blocks_left,
1916                         rbld_buf.ldev_size);
1917 }
1918
1919 static ssize_t rebuild_store(struct device *dev,
1920                 struct device_attribute *attr, const char *buf, size_t count)
1921 {
1922         struct scsi_device *sdev = to_scsi_device(dev);
1923         struct myrb_hba *cb = shost_priv(sdev->host);
1924         struct myrb_cmdblk *cmd_blk;
1925         union myrb_cmd_mbox *mbox;
1926         unsigned short status;
1927         int rc, start;
1928         const char *msg;
1929
1930         rc = kstrtoint(buf, 0, &start);
1931         if (rc)
1932                 return rc;
1933
1934         if (sdev->channel >= myrb_logical_channel(sdev->host))
1935                 return -ENXIO;
1936
1937         status = myrb_get_rbld_progress(cb, NULL);
1938         if (start) {
1939                 if (status == MYRB_STATUS_SUCCESS) {
1940                         sdev_printk(KERN_INFO, sdev,
1941                                     "Rebuild Not Initiated; already in progress\n");
1942                         return -EALREADY;
1943                 }
1944                 mutex_lock(&cb->dcmd_mutex);
1945                 cmd_blk = &cb->dcmd_blk;
1946                 myrb_reset_cmd(cmd_blk);
1947                 mbox = &cmd_blk->mbox;
1948                 mbox->type3D.opcode = MYRB_CMD_REBUILD_ASYNC;
1949                 mbox->type3D.id = MYRB_DCMD_TAG;
1950                 mbox->type3D.channel = sdev->channel;
1951                 mbox->type3D.target = sdev->id;
1952                 status = myrb_exec_cmd(cb, cmd_blk);
1953                 mutex_unlock(&cb->dcmd_mutex);
1954         } else {
1955                 struct pci_dev *pdev = cb->pdev;
1956                 unsigned char *rate;
1957                 dma_addr_t rate_addr;
1958
1959                 if (status != MYRB_STATUS_SUCCESS) {
1960                         sdev_printk(KERN_INFO, sdev,
1961                                     "Rebuild Not Cancelled; not in progress\n");
1962                         return 0;
1963                 }
1964
1965                 rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
1966                                           &rate_addr, GFP_KERNEL);
1967                 if (rate == NULL) {
1968                         sdev_printk(KERN_INFO, sdev,
1969                                     "Cancellation of Rebuild Failed - Out of Memory\n");
1970                         return -ENOMEM;
1971                 }
1972                 mutex_lock(&cb->dcmd_mutex);
1973                 cmd_blk = &cb->dcmd_blk;
1974                 myrb_reset_cmd(cmd_blk);
1975                 mbox = &cmd_blk->mbox;
1976                 mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
1977                 mbox->type3R.id = MYRB_DCMD_TAG;
1978                 mbox->type3R.rbld_rate = 0xFF;
1979                 mbox->type3R.addr = rate_addr;
1980                 status = myrb_exec_cmd(cb, cmd_blk);
1981                 dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
1982                 mutex_unlock(&cb->dcmd_mutex);
1983         }
1984         if (status == MYRB_STATUS_SUCCESS) {
1985                 sdev_printk(KERN_INFO, sdev, "Rebuild %s\n",
1986                             start ? "Initiated" : "Cancelled");
1987                 return count;
1988         }
1989         if (!start) {
1990                 sdev_printk(KERN_INFO, sdev,
1991                             "Rebuild Not Cancelled, status 0x%x\n",
1992                             status);
1993                 return -EIO;
1994         }
1995
1996         switch (status) {
1997         case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
1998                 msg = "Attempt to Rebuild Online or Unresponsive Drive";
1999                 break;
2000         case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
2001                 msg = "New Disk Failed During Rebuild";
2002                 break;
2003         case MYRB_STATUS_INVALID_ADDRESS:
2004                 msg = "Invalid Device Address";
2005                 break;
2006         case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
2007                 msg = "Already in Progress";
2008                 break;
2009         default:
2010                 msg = NULL;
2011                 break;
2012         }
2013         if (msg)
2014                 sdev_printk(KERN_INFO, sdev,
2015                             "Rebuild Failed - %s\n", msg);
2016         else
2017                 sdev_printk(KERN_INFO, sdev,
2018                             "Rebuild Failed, status 0x%x\n", status);
2019
2020         return -EIO;
2021 }
2022 static DEVICE_ATTR_RW(rebuild);
2023
2024 static ssize_t consistency_check_store(struct device *dev,
2025                 struct device_attribute *attr, const char *buf, size_t count)
2026 {
2027         struct scsi_device *sdev = to_scsi_device(dev);
2028         struct myrb_hba *cb = shost_priv(sdev->host);
2029         struct myrb_rbld_progress rbld_buf;
2030         struct myrb_cmdblk *cmd_blk;
2031         union myrb_cmd_mbox *mbox;
2032         unsigned short ldev_num = 0xFFFF;
2033         unsigned short status;
2034         int rc, start;
2035         const char *msg;
2036
2037         rc = kstrtoint(buf, 0, &start);
2038         if (rc)
2039                 return rc;
2040
2041         if (sdev->channel < myrb_logical_channel(sdev->host))
2042                 return -ENXIO;
2043
2044         status = myrb_get_rbld_progress(cb, &rbld_buf);
2045         if (start) {
2046                 if (status == MYRB_STATUS_SUCCESS) {
2047                         sdev_printk(KERN_INFO, sdev,
2048                                     "Check Consistency Not Initiated; already in progress\n");
2049                         return -EALREADY;
2050                 }
2051                 mutex_lock(&cb->dcmd_mutex);
2052                 cmd_blk = &cb->dcmd_blk;
2053                 myrb_reset_cmd(cmd_blk);
2054                 mbox = &cmd_blk->mbox;
2055                 mbox->type3C.opcode = MYRB_CMD_CHECK_CONSISTENCY_ASYNC;
2056                 mbox->type3C.id = MYRB_DCMD_TAG;
2057                 mbox->type3C.ldev_num = sdev->id;
2058                 mbox->type3C.auto_restore = true;
2059
2060                 status = myrb_exec_cmd(cb, cmd_blk);
2061                 mutex_unlock(&cb->dcmd_mutex);
2062         } else {
2063                 struct pci_dev *pdev = cb->pdev;
2064                 unsigned char *rate;
2065                 dma_addr_t rate_addr;
2066
2067                 if (ldev_num != sdev->id) {
2068                         sdev_printk(KERN_INFO, sdev,
2069                                     "Check Consistency Not Cancelled; not in progress\n");
2070                         return 0;
2071                 }
2072                 rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
2073                                           &rate_addr, GFP_KERNEL);
2074                 if (rate == NULL) {
2075                         sdev_printk(KERN_INFO, sdev,
2076                                     "Cancellation of Check Consistency Failed - Out of Memory\n");
2077                         return -ENOMEM;
2078                 }
2079                 mutex_lock(&cb->dcmd_mutex);
2080                 cmd_blk = &cb->dcmd_blk;
2081                 myrb_reset_cmd(cmd_blk);
2082                 mbox = &cmd_blk->mbox;
2083                 mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
2084                 mbox->type3R.id = MYRB_DCMD_TAG;
2085                 mbox->type3R.rbld_rate = 0xFF;
2086                 mbox->type3R.addr = rate_addr;
2087                 status = myrb_exec_cmd(cb, cmd_blk);
2088                 dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
2089                 mutex_unlock(&cb->dcmd_mutex);
2090         }
2091         if (status == MYRB_STATUS_SUCCESS) {
2092                 sdev_printk(KERN_INFO, sdev, "Check Consistency %s\n",
2093                             start ? "Initiated" : "Cancelled");
2094                 return count;
2095         }
2096         if (!start) {
2097                 sdev_printk(KERN_INFO, sdev,
2098                             "Check Consistency Not Cancelled, status 0x%x\n",
2099                             status);
2100                 return -EIO;
2101         }
2102
2103         switch (status) {
2104         case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
2105                 msg = "Dependent Physical Device is DEAD";
2106                 break;
2107         case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
2108                 msg = "New Disk Failed During Rebuild";
2109                 break;
2110         case MYRB_STATUS_INVALID_ADDRESS:
2111                 msg = "Invalid or Nonredundant Logical Drive";
2112                 break;
2113         case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
2114                 msg = "Already in Progress";
2115                 break;
2116         default:
2117                 msg = NULL;
2118                 break;
2119         }
2120         if (msg)
2121                 sdev_printk(KERN_INFO, sdev,
2122                             "Check Consistency Failed - %s\n", msg);
2123         else
2124                 sdev_printk(KERN_INFO, sdev,
2125                             "Check Consistency Failed, status 0x%x\n", status);
2126
2127         return -EIO;
2128 }
2129
2130 static ssize_t consistency_check_show(struct device *dev,
2131                 struct device_attribute *attr, char *buf)
2132 {
2133         return rebuild_show(dev, attr, buf);
2134 }
2135 static DEVICE_ATTR_RW(consistency_check);
2136
2137 static ssize_t ctlr_num_show(struct device *dev,
2138                 struct device_attribute *attr, char *buf)
2139 {
2140         struct Scsi_Host *shost = class_to_shost(dev);
2141         struct myrb_hba *cb = shost_priv(shost);
2142
2143         return snprintf(buf, 20, "%u\n", cb->ctlr_num);
2144 }
2145 static DEVICE_ATTR_RO(ctlr_num);
2146
2147 static ssize_t firmware_show(struct device *dev,
2148                 struct device_attribute *attr, char *buf)
2149 {
2150         struct Scsi_Host *shost = class_to_shost(dev);
2151         struct myrb_hba *cb = shost_priv(shost);
2152
2153         return snprintf(buf, 16, "%s\n", cb->fw_version);
2154 }
2155 static DEVICE_ATTR_RO(firmware);
2156
2157 static ssize_t model_show(struct device *dev,
2158                 struct device_attribute *attr, char *buf)
2159 {
2160         struct Scsi_Host *shost = class_to_shost(dev);
2161         struct myrb_hba *cb = shost_priv(shost);
2162
2163         return snprintf(buf, 16, "%s\n", cb->model_name);
2164 }
2165 static DEVICE_ATTR_RO(model);
2166
2167 static ssize_t flush_cache_store(struct device *dev,
2168                 struct device_attribute *attr, const char *buf, size_t count)
2169 {
2170         struct Scsi_Host *shost = class_to_shost(dev);
2171         struct myrb_hba *cb = shost_priv(shost);
2172         unsigned short status;
2173
2174         status = myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
2175         if (status == MYRB_STATUS_SUCCESS) {
2176                 shost_printk(KERN_INFO, shost,
2177                              "Cache Flush Completed\n");
2178                 return count;
2179         }
2180         shost_printk(KERN_INFO, shost,
2181                      "Cache Flush Failed, status %x\n", status);
2182         return -EIO;
2183 }
2184 static DEVICE_ATTR_WO(flush_cache);
2185
2186 static struct device_attribute *myrb_sdev_attrs[] = {
2187         &dev_attr_rebuild,
2188         &dev_attr_consistency_check,
2189         &dev_attr_raid_state,
2190         &dev_attr_raid_level,
2191         NULL,
2192 };
2193
2194 static struct device_attribute *myrb_shost_attrs[] = {
2195         &dev_attr_ctlr_num,
2196         &dev_attr_model,
2197         &dev_attr_firmware,
2198         &dev_attr_flush_cache,
2199         NULL,
2200 };
2201
2202 static struct scsi_host_template myrb_template = {
2203         .module                 = THIS_MODULE,
2204         .name                   = "DAC960",
2205         .proc_name              = "myrb",
2206         .queuecommand           = myrb_queuecommand,
2207         .eh_host_reset_handler  = myrb_host_reset,
2208         .slave_alloc            = myrb_slave_alloc,
2209         .slave_configure        = myrb_slave_configure,
2210         .slave_destroy          = myrb_slave_destroy,
2211         .bios_param             = myrb_biosparam,
2212         .cmd_size               = sizeof(struct myrb_cmdblk),
2213         .shost_attrs            = myrb_shost_attrs,
2214         .sdev_attrs             = myrb_sdev_attrs,
2215         .this_id                = -1,
2216 };
2217
2218 /**
2219  * myrb_is_raid - return boolean indicating device is raid volume
2220  * @dev: the device struct object
2221  */
2222 static int myrb_is_raid(struct device *dev)
2223 {
2224         struct scsi_device *sdev = to_scsi_device(dev);
2225
2226         return sdev->channel == myrb_logical_channel(sdev->host);
2227 }
2228
2229 /**
2230  * myrb_get_resync - get raid volume resync percent complete
2231  * @dev: the device struct object
2232  */
2233 static void myrb_get_resync(struct device *dev)
2234 {
2235         struct scsi_device *sdev = to_scsi_device(dev);
2236         struct myrb_hba *cb = shost_priv(sdev->host);
2237         struct myrb_rbld_progress rbld_buf;
2238         unsigned int percent_complete = 0;
2239         unsigned short status;
2240         unsigned int ldev_size = 0, remaining = 0;
2241
2242         if (sdev->channel < myrb_logical_channel(sdev->host))
2243                 return;
2244         status = myrb_get_rbld_progress(cb, &rbld_buf);
2245         if (status == MYRB_STATUS_SUCCESS) {
2246                 if (rbld_buf.ldev_num == sdev->id) {
2247                         ldev_size = rbld_buf.ldev_size;
2248                         remaining = rbld_buf.blocks_left;
2249                 }
2250         }
2251         if (remaining && ldev_size)
2252                 percent_complete = (ldev_size - remaining) * 100 / ldev_size;
2253         raid_set_resync(myrb_raid_template, dev, percent_complete);
2254 }
2255
2256 /**
2257  * myrb_get_state - get raid volume status
2258  * @dev: the device struct object
2259  */
2260 static void myrb_get_state(struct device *dev)
2261 {
2262         struct scsi_device *sdev = to_scsi_device(dev);
2263         struct myrb_hba *cb = shost_priv(sdev->host);
2264         struct myrb_ldev_info *ldev_info = sdev->hostdata;
2265         enum raid_state state = RAID_STATE_UNKNOWN;
2266         unsigned short status;
2267
2268         if (sdev->channel < myrb_logical_channel(sdev->host) || !ldev_info)
2269                 state = RAID_STATE_UNKNOWN;
2270         else {
2271                 status = myrb_get_rbld_progress(cb, NULL);
2272                 if (status == MYRB_STATUS_SUCCESS)
2273                         state = RAID_STATE_RESYNCING;
2274                 else {
2275                         switch (ldev_info->state) {
2276                         case MYRB_DEVICE_ONLINE:
2277                                 state = RAID_STATE_ACTIVE;
2278                                 break;
2279                         case MYRB_DEVICE_WO:
2280                         case MYRB_DEVICE_CRITICAL:
2281                                 state = RAID_STATE_DEGRADED;
2282                                 break;
2283                         default:
2284                                 state = RAID_STATE_OFFLINE;
2285                         }
2286                 }
2287         }
2288         raid_set_state(myrb_raid_template, dev, state);
2289 }
2290
2291 static struct raid_function_template myrb_raid_functions = {
2292         .cookie         = &myrb_template,
2293         .is_raid        = myrb_is_raid,
2294         .get_resync     = myrb_get_resync,
2295         .get_state      = myrb_get_state,
2296 };
2297
2298 static void myrb_handle_scsi(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk,
2299                 struct scsi_cmnd *scmd)
2300 {
2301         unsigned short status;
2302
2303         if (!cmd_blk)
2304                 return;
2305
2306         scsi_dma_unmap(scmd);
2307
2308         if (cmd_blk->dcdb) {
2309                 memcpy(scmd->sense_buffer, &cmd_blk->dcdb->sense, 64);
2310                 dma_pool_free(cb->dcdb_pool, cmd_blk->dcdb,
2311                               cmd_blk->dcdb_addr);
2312                 cmd_blk->dcdb = NULL;
2313         }
2314         if (cmd_blk->sgl) {
2315                 dma_pool_free(cb->sg_pool, cmd_blk->sgl, cmd_blk->sgl_addr);
2316                 cmd_blk->sgl = NULL;
2317                 cmd_blk->sgl_addr = 0;
2318         }
2319         status = cmd_blk->status;
2320         switch (status) {
2321         case MYRB_STATUS_SUCCESS:
2322         case MYRB_STATUS_DEVICE_BUSY:
2323                 scmd->result = (DID_OK << 16) | status;
2324                 break;
2325         case MYRB_STATUS_BAD_DATA:
2326                 dev_dbg(&scmd->device->sdev_gendev,
2327                         "Bad Data Encountered\n");
2328                 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
2329                         /* Unrecovered read error */
2330                         scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x11, 0);
2331                 else
2332                         /* Write error */
2333                         scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x0C, 0);
2334                 break;
2335         case MYRB_STATUS_IRRECOVERABLE_DATA_ERROR:
2336                 scmd_printk(KERN_ERR, scmd, "Irrecoverable Data Error\n");
2337                 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
2338                         /* Unrecovered read error, auto-reallocation failed */
2339                         scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x11, 0x04);
2340                 else
2341                         /* Write error, auto-reallocation failed */
2342                         scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x0C, 0x02);
2343                 break;
2344         case MYRB_STATUS_LDRV_NONEXISTENT_OR_OFFLINE:
2345                 dev_dbg(&scmd->device->sdev_gendev,
2346                             "Logical Drive Nonexistent or Offline");
2347                 scmd->result = (DID_BAD_TARGET << 16);
2348                 break;
2349         case MYRB_STATUS_ACCESS_BEYOND_END_OF_LDRV:
2350                 dev_dbg(&scmd->device->sdev_gendev,
2351                             "Attempt to Access Beyond End of Logical Drive");
2352                 /* Logical block address out of range */
2353                 scsi_build_sense(scmd, 0, NOT_READY, 0x21, 0);
2354                 break;
2355         case MYRB_STATUS_DEVICE_NONRESPONSIVE:
2356                 dev_dbg(&scmd->device->sdev_gendev, "Device nonresponsive\n");
2357                 scmd->result = (DID_BAD_TARGET << 16);
2358                 break;
2359         default:
2360                 scmd_printk(KERN_ERR, scmd,
2361                             "Unexpected Error Status %04X", status);
2362                 scmd->result = (DID_ERROR << 16);
2363                 break;
2364         }
2365         scmd->scsi_done(scmd);
2366 }
2367
2368 static void myrb_handle_cmdblk(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
2369 {
2370         if (!cmd_blk)
2371                 return;
2372
2373         if (cmd_blk->completion) {
2374                 complete(cmd_blk->completion);
2375                 cmd_blk->completion = NULL;
2376         }
2377 }
2378
2379 static void myrb_monitor(struct work_struct *work)
2380 {
2381         struct myrb_hba *cb = container_of(work,
2382                         struct myrb_hba, monitor_work.work);
2383         struct Scsi_Host *shost = cb->host;
2384         unsigned long interval = MYRB_PRIMARY_MONITOR_INTERVAL;
2385
2386         dev_dbg(&shost->shost_gendev, "monitor tick\n");
2387
2388         if (cb->new_ev_seq > cb->old_ev_seq) {
2389                 int event = cb->old_ev_seq;
2390
2391                 dev_dbg(&shost->shost_gendev,
2392                         "get event log no %d/%d\n",
2393                         cb->new_ev_seq, event);
2394                 myrb_get_event(cb, event);
2395                 cb->old_ev_seq = event + 1;
2396                 interval = 10;
2397         } else if (cb->need_err_info) {
2398                 cb->need_err_info = false;
2399                 dev_dbg(&shost->shost_gendev, "get error table\n");
2400                 myrb_get_errtable(cb);
2401                 interval = 10;
2402         } else if (cb->need_rbld && cb->rbld_first) {
2403                 cb->need_rbld = false;
2404                 dev_dbg(&shost->shost_gendev,
2405                         "get rebuild progress\n");
2406                 myrb_update_rbld_progress(cb);
2407                 interval = 10;
2408         } else if (cb->need_ldev_info) {
2409                 cb->need_ldev_info = false;
2410                 dev_dbg(&shost->shost_gendev,
2411                         "get logical drive info\n");
2412                 myrb_get_ldev_info(cb);
2413                 interval = 10;
2414         } else if (cb->need_rbld) {
2415                 cb->need_rbld = false;
2416                 dev_dbg(&shost->shost_gendev,
2417                         "get rebuild progress\n");
2418                 myrb_update_rbld_progress(cb);
2419                 interval = 10;
2420         } else if (cb->need_cc_status) {
2421                 cb->need_cc_status = false;
2422                 dev_dbg(&shost->shost_gendev,
2423                         "get consistency check progress\n");
2424                 myrb_get_cc_progress(cb);
2425                 interval = 10;
2426         } else if (cb->need_bgi_status) {
2427                 cb->need_bgi_status = false;
2428                 dev_dbg(&shost->shost_gendev, "get background init status\n");
2429                 myrb_bgi_control(cb);
2430                 interval = 10;
2431         } else {
2432                 dev_dbg(&shost->shost_gendev, "new enquiry\n");
2433                 mutex_lock(&cb->dma_mutex);
2434                 myrb_hba_enquiry(cb);
2435                 mutex_unlock(&cb->dma_mutex);
2436                 if ((cb->new_ev_seq - cb->old_ev_seq > 0) ||
2437                     cb->need_err_info || cb->need_rbld ||
2438                     cb->need_ldev_info || cb->need_cc_status ||
2439                     cb->need_bgi_status) {
2440                         dev_dbg(&shost->shost_gendev,
2441                                 "reschedule monitor\n");
2442                         interval = 0;
2443                 }
2444         }
2445         if (interval > 1)
2446                 cb->primary_monitor_time = jiffies;
2447         queue_delayed_work(cb->work_q, &cb->monitor_work, interval);
2448 }
2449
2450 /*
2451  * myrb_err_status - reports controller BIOS messages
2452  *
2453  * Controller BIOS messages are passed through the Error Status Register
2454  * when the driver performs the BIOS handshaking.
2455  *
2456  * Return: true for fatal errors and false otherwise.
2457  */
2458 static bool myrb_err_status(struct myrb_hba *cb, unsigned char error,
2459                 unsigned char parm0, unsigned char parm1)
2460 {
2461         struct pci_dev *pdev = cb->pdev;
2462
2463         switch (error) {
2464         case 0x00:
2465                 dev_info(&pdev->dev,
2466                          "Physical Device %d:%d Not Responding\n",
2467                          parm1, parm0);
2468                 break;
2469         case 0x08:
2470                 dev_notice(&pdev->dev, "Spinning Up Drives\n");
2471                 break;
2472         case 0x30:
2473                 dev_notice(&pdev->dev, "Configuration Checksum Error\n");
2474                 break;
2475         case 0x60:
2476                 dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n");
2477                 break;
2478         case 0x70:
2479                 dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n");
2480                 break;
2481         case 0x90:
2482                 dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n",
2483                            parm1, parm0);
2484                 break;
2485         case 0xA0:
2486                 dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n");
2487                 break;
2488         case 0xB0:
2489                 dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n");
2490                 break;
2491         case 0xD0:
2492                 dev_notice(&pdev->dev, "New Controller Configuration Found\n");
2493                 break;
2494         case 0xF0:
2495                 dev_err(&pdev->dev, "Fatal Memory Parity Error\n");
2496                 return true;
2497         default:
2498                 dev_err(&pdev->dev, "Unknown Initialization Error %02X\n",
2499                         error);
2500                 return true;
2501         }
2502         return false;
2503 }
2504
2505 /*
2506  * Hardware-specific functions
2507  */
2508
2509 /*
2510  * DAC960 LA Series Controllers
2511  */
2512
2513 static inline void DAC960_LA_hw_mbox_new_cmd(void __iomem *base)
2514 {
2515         writeb(DAC960_LA_IDB_HWMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
2516 }
2517
2518 static inline void DAC960_LA_ack_hw_mbox_status(void __iomem *base)
2519 {
2520         writeb(DAC960_LA_IDB_HWMBOX_ACK_STS, base + DAC960_LA_IDB_OFFSET);
2521 }
2522
2523 static inline void DAC960_LA_reset_ctrl(void __iomem *base)
2524 {
2525         writeb(DAC960_LA_IDB_CTRL_RESET, base + DAC960_LA_IDB_OFFSET);
2526 }
2527
2528 static inline void DAC960_LA_mem_mbox_new_cmd(void __iomem *base)
2529 {
2530         writeb(DAC960_LA_IDB_MMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
2531 }
2532
2533 static inline bool DAC960_LA_hw_mbox_is_full(void __iomem *base)
2534 {
2535         unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
2536
2537         return !(idb & DAC960_LA_IDB_HWMBOX_EMPTY);
2538 }
2539
2540 static inline bool DAC960_LA_init_in_progress(void __iomem *base)
2541 {
2542         unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
2543
2544         return !(idb & DAC960_LA_IDB_INIT_DONE);
2545 }
2546
2547 static inline void DAC960_LA_ack_hw_mbox_intr(void __iomem *base)
2548 {
2549         writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET);
2550 }
2551
2552 static inline void DAC960_LA_ack_intr(void __iomem *base)
2553 {
2554         writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ | DAC960_LA_ODB_MMBOX_ACK_IRQ,
2555                base + DAC960_LA_ODB_OFFSET);
2556 }
2557
2558 static inline bool DAC960_LA_hw_mbox_status_available(void __iomem *base)
2559 {
2560         unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET);
2561
2562         return odb & DAC960_LA_ODB_HWMBOX_STS_AVAIL;
2563 }
2564
2565 static inline void DAC960_LA_enable_intr(void __iomem *base)
2566 {
2567         unsigned char odb = 0xFF;
2568
2569         odb &= ~DAC960_LA_IRQMASK_DISABLE_IRQ;
2570         writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
2571 }
2572
2573 static inline void DAC960_LA_disable_intr(void __iomem *base)
2574 {
2575         unsigned char odb = 0xFF;
2576
2577         odb |= DAC960_LA_IRQMASK_DISABLE_IRQ;
2578         writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
2579 }
2580
2581 static inline void DAC960_LA_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
2582                 union myrb_cmd_mbox *mbox)
2583 {
2584         mem_mbox->words[1] = mbox->words[1];
2585         mem_mbox->words[2] = mbox->words[2];
2586         mem_mbox->words[3] = mbox->words[3];
2587         /* Memory barrier to prevent reordering */
2588         wmb();
2589         mem_mbox->words[0] = mbox->words[0];
2590         /* Memory barrier to force PCI access */
2591         mb();
2592 }
2593
2594 static inline void DAC960_LA_write_hw_mbox(void __iomem *base,
2595                 union myrb_cmd_mbox *mbox)
2596 {
2597         writel(mbox->words[0], base + DAC960_LA_CMDOP_OFFSET);
2598         writel(mbox->words[1], base + DAC960_LA_MBOX4_OFFSET);
2599         writel(mbox->words[2], base + DAC960_LA_MBOX8_OFFSET);
2600         writeb(mbox->bytes[12], base + DAC960_LA_MBOX12_OFFSET);
2601 }
2602
2603 static inline unsigned short DAC960_LA_read_status(void __iomem *base)
2604 {
2605         return readw(base + DAC960_LA_STS_OFFSET);
2606 }
2607
2608 static inline bool
2609 DAC960_LA_read_error_status(void __iomem *base, unsigned char *error,
2610                 unsigned char *param0, unsigned char *param1)
2611 {
2612         unsigned char errsts = readb(base + DAC960_LA_ERRSTS_OFFSET);
2613
2614         if (!(errsts & DAC960_LA_ERRSTS_PENDING))
2615                 return false;
2616         errsts &= ~DAC960_LA_ERRSTS_PENDING;
2617
2618         *error = errsts;
2619         *param0 = readb(base + DAC960_LA_CMDOP_OFFSET);
2620         *param1 = readb(base + DAC960_LA_CMDID_OFFSET);
2621         writeb(0xFF, base + DAC960_LA_ERRSTS_OFFSET);
2622         return true;
2623 }
2624
2625 static inline unsigned short
2626 DAC960_LA_mbox_init(struct pci_dev *pdev, void __iomem *base,
2627                 union myrb_cmd_mbox *mbox)
2628 {
2629         unsigned short status;
2630         int timeout = 0;
2631
2632         while (timeout < MYRB_MAILBOX_TIMEOUT) {
2633                 if (!DAC960_LA_hw_mbox_is_full(base))
2634                         break;
2635                 udelay(10);
2636                 timeout++;
2637         }
2638         if (DAC960_LA_hw_mbox_is_full(base)) {
2639                 dev_err(&pdev->dev,
2640                         "Timeout waiting for empty mailbox\n");
2641                 return MYRB_STATUS_SUBSYS_TIMEOUT;
2642         }
2643         DAC960_LA_write_hw_mbox(base, mbox);
2644         DAC960_LA_hw_mbox_new_cmd(base);
2645         timeout = 0;
2646         while (timeout < MYRB_MAILBOX_TIMEOUT) {
2647                 if (DAC960_LA_hw_mbox_status_available(base))
2648                         break;
2649                 udelay(10);
2650                 timeout++;
2651         }
2652         if (!DAC960_LA_hw_mbox_status_available(base)) {
2653                 dev_err(&pdev->dev, "Timeout waiting for mailbox status\n");
2654                 return MYRB_STATUS_SUBSYS_TIMEOUT;
2655         }
2656         status = DAC960_LA_read_status(base);
2657         DAC960_LA_ack_hw_mbox_intr(base);
2658         DAC960_LA_ack_hw_mbox_status(base);
2659
2660         return status;
2661 }
2662
2663 static int DAC960_LA_hw_init(struct pci_dev *pdev,
2664                 struct myrb_hba *cb, void __iomem *base)
2665 {
2666         int timeout = 0;
2667         unsigned char error, parm0, parm1;
2668
2669         DAC960_LA_disable_intr(base);
2670         DAC960_LA_ack_hw_mbox_status(base);
2671         udelay(1000);
2672         while (DAC960_LA_init_in_progress(base) &&
2673                timeout < MYRB_MAILBOX_TIMEOUT) {
2674                 if (DAC960_LA_read_error_status(base, &error,
2675                                               &parm0, &parm1) &&
2676                     myrb_err_status(cb, error, parm0, parm1))
2677                         return -ENODEV;
2678                 udelay(10);
2679                 timeout++;
2680         }
2681         if (timeout == MYRB_MAILBOX_TIMEOUT) {
2682                 dev_err(&pdev->dev,
2683                         "Timeout waiting for Controller Initialisation\n");
2684                 return -ETIMEDOUT;
2685         }
2686         if (!myrb_enable_mmio(cb, DAC960_LA_mbox_init)) {
2687                 dev_err(&pdev->dev,
2688                         "Unable to Enable Memory Mailbox Interface\n");
2689                 DAC960_LA_reset_ctrl(base);
2690                 return -ENODEV;
2691         }
2692         DAC960_LA_enable_intr(base);
2693         cb->qcmd = myrb_qcmd;
2694         cb->write_cmd_mbox = DAC960_LA_write_cmd_mbox;
2695         if (cb->dual_mode_interface)
2696                 cb->get_cmd_mbox = DAC960_LA_mem_mbox_new_cmd;
2697         else
2698                 cb->get_cmd_mbox = DAC960_LA_hw_mbox_new_cmd;
2699         cb->disable_intr = DAC960_LA_disable_intr;
2700         cb->reset = DAC960_LA_reset_ctrl;
2701
2702         return 0;
2703 }
2704
2705 static irqreturn_t DAC960_LA_intr_handler(int irq, void *arg)
2706 {
2707         struct myrb_hba *cb = arg;
2708         void __iomem *base = cb->io_base;
2709         struct myrb_stat_mbox *next_stat_mbox;
2710         unsigned long flags;
2711
2712         spin_lock_irqsave(&cb->queue_lock, flags);
2713         DAC960_LA_ack_intr(base);
2714         next_stat_mbox = cb->next_stat_mbox;
2715         while (next_stat_mbox->valid) {
2716                 unsigned char id = next_stat_mbox->id;
2717                 struct scsi_cmnd *scmd = NULL;
2718                 struct myrb_cmdblk *cmd_blk = NULL;
2719
2720                 if (id == MYRB_DCMD_TAG)
2721                         cmd_blk = &cb->dcmd_blk;
2722                 else if (id == MYRB_MCMD_TAG)
2723                         cmd_blk = &cb->mcmd_blk;
2724                 else {
2725                         scmd = scsi_host_find_tag(cb->host, id - 3);
2726                         if (scmd)
2727                                 cmd_blk = scsi_cmd_priv(scmd);
2728                 }
2729                 if (cmd_blk)
2730                         cmd_blk->status = next_stat_mbox->status;
2731                 else
2732                         dev_err(&cb->pdev->dev,
2733                                 "Unhandled command completion %d\n", id);
2734
2735                 memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
2736                 if (++next_stat_mbox > cb->last_stat_mbox)
2737                         next_stat_mbox = cb->first_stat_mbox;
2738
2739                 if (cmd_blk) {
2740                         if (id < 3)
2741                                 myrb_handle_cmdblk(cb, cmd_blk);
2742                         else
2743                                 myrb_handle_scsi(cb, cmd_blk, scmd);
2744                 }
2745         }
2746         cb->next_stat_mbox = next_stat_mbox;
2747         spin_unlock_irqrestore(&cb->queue_lock, flags);
2748         return IRQ_HANDLED;
2749 }
2750
2751 static struct myrb_privdata DAC960_LA_privdata = {
2752         .hw_init =      DAC960_LA_hw_init,
2753         .irq_handler =  DAC960_LA_intr_handler,
2754         .mmio_size =    DAC960_LA_mmio_size,
2755 };
2756
2757 /*
2758  * DAC960 PG Series Controllers
2759  */
2760 static inline void DAC960_PG_hw_mbox_new_cmd(void __iomem *base)
2761 {
2762         writel(DAC960_PG_IDB_HWMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
2763 }
2764
2765 static inline void DAC960_PG_ack_hw_mbox_status(void __iomem *base)
2766 {
2767         writel(DAC960_PG_IDB_HWMBOX_ACK_STS, base + DAC960_PG_IDB_OFFSET);
2768 }
2769
2770 static inline void DAC960_PG_reset_ctrl(void __iomem *base)
2771 {
2772         writel(DAC960_PG_IDB_CTRL_RESET, base + DAC960_PG_IDB_OFFSET);
2773 }
2774
2775 static inline void DAC960_PG_mem_mbox_new_cmd(void __iomem *base)
2776 {
2777         writel(DAC960_PG_IDB_MMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
2778 }
2779
2780 static inline bool DAC960_PG_hw_mbox_is_full(void __iomem *base)
2781 {
2782         unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
2783
2784         return idb & DAC960_PG_IDB_HWMBOX_FULL;
2785 }
2786
2787 static inline bool DAC960_PG_init_in_progress(void __iomem *base)
2788 {
2789         unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
2790
2791         return idb & DAC960_PG_IDB_INIT_IN_PROGRESS;
2792 }
2793
2794 static inline void DAC960_PG_ack_hw_mbox_intr(void __iomem *base)
2795 {
2796         writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET);
2797 }
2798
2799 static inline void DAC960_PG_ack_intr(void __iomem *base)
2800 {
2801         writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ | DAC960_PG_ODB_MMBOX_ACK_IRQ,
2802                base + DAC960_PG_ODB_OFFSET);
2803 }
2804
2805 static inline bool DAC960_PG_hw_mbox_status_available(void __iomem *base)
2806 {
2807         unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET);
2808
2809         return odb & DAC960_PG_ODB_HWMBOX_STS_AVAIL;
2810 }
2811
2812 static inline void DAC960_PG_enable_intr(void __iomem *base)
2813 {
2814         unsigned int imask = (unsigned int)-1;
2815
2816         imask &= ~DAC960_PG_IRQMASK_DISABLE_IRQ;
2817         writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
2818 }
2819
2820 static inline void DAC960_PG_disable_intr(void __iomem *base)
2821 {
2822         unsigned int imask = (unsigned int)-1;
2823
2824         writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
2825 }
2826
2827 static inline void DAC960_PG_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
2828                 union myrb_cmd_mbox *mbox)
2829 {
2830         mem_mbox->words[1] = mbox->words[1];
2831         mem_mbox->words[2] = mbox->words[2];
2832         mem_mbox->words[3] = mbox->words[3];
2833         /* Memory barrier to prevent reordering */
2834         wmb();
2835         mem_mbox->words[0] = mbox->words[0];
2836         /* Memory barrier to force PCI access */
2837         mb();
2838 }
2839
2840 static inline void DAC960_PG_write_hw_mbox(void __iomem *base,
2841                 union myrb_cmd_mbox *mbox)
2842 {
2843         writel(mbox->words[0], base + DAC960_PG_CMDOP_OFFSET);
2844         writel(mbox->words[1], base + DAC960_PG_MBOX4_OFFSET);
2845         writel(mbox->words[2], base + DAC960_PG_MBOX8_OFFSET);
2846         writeb(mbox->bytes[12], base + DAC960_PG_MBOX12_OFFSET);
2847 }
2848
2849 static inline unsigned short
2850 DAC960_PG_read_status(void __iomem *base)
2851 {
2852         return readw(base + DAC960_PG_STS_OFFSET);
2853 }
2854
2855 static inline bool
2856 DAC960_PG_read_error_status(void __iomem *base, unsigned char *error,
2857                 unsigned char *param0, unsigned char *param1)
2858 {
2859         unsigned char errsts = readb(base + DAC960_PG_ERRSTS_OFFSET);
2860
2861         if (!(errsts & DAC960_PG_ERRSTS_PENDING))
2862                 return false;
2863         errsts &= ~DAC960_PG_ERRSTS_PENDING;
2864         *error = errsts;
2865         *param0 = readb(base + DAC960_PG_CMDOP_OFFSET);
2866         *param1 = readb(base + DAC960_PG_CMDID_OFFSET);
2867         writeb(0, base + DAC960_PG_ERRSTS_OFFSET);
2868         return true;
2869 }
2870
2871 static inline unsigned short
2872 DAC960_PG_mbox_init(struct pci_dev *pdev, void __iomem *base,
2873                 union myrb_cmd_mbox *mbox)
2874 {
2875         unsigned short status;
2876         int timeout = 0;
2877
2878         while (timeout < MYRB_MAILBOX_TIMEOUT) {
2879                 if (!DAC960_PG_hw_mbox_is_full(base))
2880                         break;
2881                 udelay(10);
2882                 timeout++;
2883         }
2884         if (DAC960_PG_hw_mbox_is_full(base)) {
2885                 dev_err(&pdev->dev,
2886                         "Timeout waiting for empty mailbox\n");
2887                 return MYRB_STATUS_SUBSYS_TIMEOUT;
2888         }
2889         DAC960_PG_write_hw_mbox(base, mbox);
2890         DAC960_PG_hw_mbox_new_cmd(base);
2891
2892         timeout = 0;
2893         while (timeout < MYRB_MAILBOX_TIMEOUT) {
2894                 if (DAC960_PG_hw_mbox_status_available(base))
2895                         break;
2896                 udelay(10);
2897                 timeout++;
2898         }
2899         if (!DAC960_PG_hw_mbox_status_available(base)) {
2900                 dev_err(&pdev->dev,
2901                         "Timeout waiting for mailbox status\n");
2902                 return MYRB_STATUS_SUBSYS_TIMEOUT;
2903         }
2904         status = DAC960_PG_read_status(base);
2905         DAC960_PG_ack_hw_mbox_intr(base);
2906         DAC960_PG_ack_hw_mbox_status(base);
2907
2908         return status;
2909 }
2910
2911 static int DAC960_PG_hw_init(struct pci_dev *pdev,
2912                 struct myrb_hba *cb, void __iomem *base)
2913 {
2914         int timeout = 0;
2915         unsigned char error, parm0, parm1;
2916
2917         DAC960_PG_disable_intr(base);
2918         DAC960_PG_ack_hw_mbox_status(base);
2919         udelay(1000);
2920         while (DAC960_PG_init_in_progress(base) &&
2921                timeout < MYRB_MAILBOX_TIMEOUT) {
2922                 if (DAC960_PG_read_error_status(base, &error,
2923                                                 &parm0, &parm1) &&
2924                     myrb_err_status(cb, error, parm0, parm1))
2925                         return -EIO;
2926                 udelay(10);
2927                 timeout++;
2928         }
2929         if (timeout == MYRB_MAILBOX_TIMEOUT) {
2930                 dev_err(&pdev->dev,
2931                         "Timeout waiting for Controller Initialisation\n");
2932                 return -ETIMEDOUT;
2933         }
2934         if (!myrb_enable_mmio(cb, DAC960_PG_mbox_init)) {
2935                 dev_err(&pdev->dev,
2936                         "Unable to Enable Memory Mailbox Interface\n");
2937                 DAC960_PG_reset_ctrl(base);
2938                 return -ENODEV;
2939         }
2940         DAC960_PG_enable_intr(base);
2941         cb->qcmd = myrb_qcmd;
2942         cb->write_cmd_mbox = DAC960_PG_write_cmd_mbox;
2943         if (cb->dual_mode_interface)
2944                 cb->get_cmd_mbox = DAC960_PG_mem_mbox_new_cmd;
2945         else
2946                 cb->get_cmd_mbox = DAC960_PG_hw_mbox_new_cmd;
2947         cb->disable_intr = DAC960_PG_disable_intr;
2948         cb->reset = DAC960_PG_reset_ctrl;
2949
2950         return 0;
2951 }
2952
2953 static irqreturn_t DAC960_PG_intr_handler(int irq, void *arg)
2954 {
2955         struct myrb_hba *cb = arg;
2956         void __iomem *base = cb->io_base;
2957         struct myrb_stat_mbox *next_stat_mbox;
2958         unsigned long flags;
2959
2960         spin_lock_irqsave(&cb->queue_lock, flags);
2961         DAC960_PG_ack_intr(base);
2962         next_stat_mbox = cb->next_stat_mbox;
2963         while (next_stat_mbox->valid) {
2964                 unsigned char id = next_stat_mbox->id;
2965                 struct scsi_cmnd *scmd = NULL;
2966                 struct myrb_cmdblk *cmd_blk = NULL;
2967
2968                 if (id == MYRB_DCMD_TAG)
2969                         cmd_blk = &cb->dcmd_blk;
2970                 else if (id == MYRB_MCMD_TAG)
2971                         cmd_blk = &cb->mcmd_blk;
2972                 else {
2973                         scmd = scsi_host_find_tag(cb->host, id - 3);
2974                         if (scmd)
2975                                 cmd_blk = scsi_cmd_priv(scmd);
2976                 }
2977                 if (cmd_blk)
2978                         cmd_blk->status = next_stat_mbox->status;
2979                 else
2980                         dev_err(&cb->pdev->dev,
2981                                 "Unhandled command completion %d\n", id);
2982
2983                 memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
2984                 if (++next_stat_mbox > cb->last_stat_mbox)
2985                         next_stat_mbox = cb->first_stat_mbox;
2986
2987                 if (id < 3)
2988                         myrb_handle_cmdblk(cb, cmd_blk);
2989                 else
2990                         myrb_handle_scsi(cb, cmd_blk, scmd);
2991         }
2992         cb->next_stat_mbox = next_stat_mbox;
2993         spin_unlock_irqrestore(&cb->queue_lock, flags);
2994         return IRQ_HANDLED;
2995 }
2996
2997 static struct myrb_privdata DAC960_PG_privdata = {
2998         .hw_init =      DAC960_PG_hw_init,
2999         .irq_handler =  DAC960_PG_intr_handler,
3000         .mmio_size =    DAC960_PG_mmio_size,
3001 };
3002
3003
3004 /*
3005  * DAC960 PD Series Controllers
3006  */
3007
3008 static inline void DAC960_PD_hw_mbox_new_cmd(void __iomem *base)
3009 {
3010         writeb(DAC960_PD_IDB_HWMBOX_NEW_CMD, base + DAC960_PD_IDB_OFFSET);
3011 }
3012
3013 static inline void DAC960_PD_ack_hw_mbox_status(void __iomem *base)
3014 {
3015         writeb(DAC960_PD_IDB_HWMBOX_ACK_STS, base + DAC960_PD_IDB_OFFSET);
3016 }
3017
3018 static inline void DAC960_PD_reset_ctrl(void __iomem *base)
3019 {
3020         writeb(DAC960_PD_IDB_CTRL_RESET, base + DAC960_PD_IDB_OFFSET);
3021 }
3022
3023 static inline bool DAC960_PD_hw_mbox_is_full(void __iomem *base)
3024 {
3025         unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
3026
3027         return idb & DAC960_PD_IDB_HWMBOX_FULL;
3028 }
3029
3030 static inline bool DAC960_PD_init_in_progress(void __iomem *base)
3031 {
3032         unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
3033
3034         return idb & DAC960_PD_IDB_INIT_IN_PROGRESS;
3035 }
3036
3037 static inline void DAC960_PD_ack_intr(void __iomem *base)
3038 {
3039         writeb(DAC960_PD_ODB_HWMBOX_ACK_IRQ, base + DAC960_PD_ODB_OFFSET);
3040 }
3041
3042 static inline bool DAC960_PD_hw_mbox_status_available(void __iomem *base)
3043 {
3044         unsigned char odb = readb(base + DAC960_PD_ODB_OFFSET);
3045
3046         return odb & DAC960_PD_ODB_HWMBOX_STS_AVAIL;
3047 }
3048
3049 static inline void DAC960_PD_enable_intr(void __iomem *base)
3050 {
3051         writeb(DAC960_PD_IRQMASK_ENABLE_IRQ, base + DAC960_PD_IRQEN_OFFSET);
3052 }
3053
3054 static inline void DAC960_PD_disable_intr(void __iomem *base)
3055 {
3056         writeb(0, base + DAC960_PD_IRQEN_OFFSET);
3057 }
3058
3059 static inline void DAC960_PD_write_cmd_mbox(void __iomem *base,
3060                 union myrb_cmd_mbox *mbox)
3061 {
3062         writel(mbox->words[0], base + DAC960_PD_CMDOP_OFFSET);
3063         writel(mbox->words[1], base + DAC960_PD_MBOX4_OFFSET);
3064         writel(mbox->words[2], base + DAC960_PD_MBOX8_OFFSET);
3065         writeb(mbox->bytes[12], base + DAC960_PD_MBOX12_OFFSET);
3066 }
3067
3068 static inline unsigned char
3069 DAC960_PD_read_status_cmd_ident(void __iomem *base)
3070 {
3071         return readb(base + DAC960_PD_STSID_OFFSET);
3072 }
3073
3074 static inline unsigned short
3075 DAC960_PD_read_status(void __iomem *base)
3076 {
3077         return readw(base + DAC960_PD_STS_OFFSET);
3078 }
3079
3080 static inline bool
3081 DAC960_PD_read_error_status(void __iomem *base, unsigned char *error,
3082                 unsigned char *param0, unsigned char *param1)
3083 {
3084         unsigned char errsts = readb(base + DAC960_PD_ERRSTS_OFFSET);
3085
3086         if (!(errsts & DAC960_PD_ERRSTS_PENDING))
3087                 return false;
3088         errsts &= ~DAC960_PD_ERRSTS_PENDING;
3089         *error = errsts;
3090         *param0 = readb(base + DAC960_PD_CMDOP_OFFSET);
3091         *param1 = readb(base + DAC960_PD_CMDID_OFFSET);
3092         writeb(0, base + DAC960_PD_ERRSTS_OFFSET);
3093         return true;
3094 }
3095
3096 static void DAC960_PD_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
3097 {
3098         void __iomem *base = cb->io_base;
3099         union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3100
3101         while (DAC960_PD_hw_mbox_is_full(base))
3102                 udelay(1);
3103         DAC960_PD_write_cmd_mbox(base, mbox);
3104         DAC960_PD_hw_mbox_new_cmd(base);
3105 }
3106
3107 static int DAC960_PD_hw_init(struct pci_dev *pdev,
3108                 struct myrb_hba *cb, void __iomem *base)
3109 {
3110         int timeout = 0;
3111         unsigned char error, parm0, parm1;
3112
3113         if (!request_region(cb->io_addr, 0x80, "myrb")) {
3114                 dev_err(&pdev->dev, "IO port 0x%lx busy\n",
3115                         (unsigned long)cb->io_addr);
3116                 return -EBUSY;
3117         }
3118         DAC960_PD_disable_intr(base);
3119         DAC960_PD_ack_hw_mbox_status(base);
3120         udelay(1000);
3121         while (DAC960_PD_init_in_progress(base) &&
3122                timeout < MYRB_MAILBOX_TIMEOUT) {
3123                 if (DAC960_PD_read_error_status(base, &error,
3124                                               &parm0, &parm1) &&
3125                     myrb_err_status(cb, error, parm0, parm1))
3126                         return -EIO;
3127                 udelay(10);
3128                 timeout++;
3129         }
3130         if (timeout == MYRB_MAILBOX_TIMEOUT) {
3131                 dev_err(&pdev->dev,
3132                         "Timeout waiting for Controller Initialisation\n");
3133                 return -ETIMEDOUT;
3134         }
3135         if (!myrb_enable_mmio(cb, NULL)) {
3136                 dev_err(&pdev->dev,
3137                         "Unable to Enable Memory Mailbox Interface\n");
3138                 DAC960_PD_reset_ctrl(base);
3139                 return -ENODEV;
3140         }
3141         DAC960_PD_enable_intr(base);
3142         cb->qcmd = DAC960_PD_qcmd;
3143         cb->disable_intr = DAC960_PD_disable_intr;
3144         cb->reset = DAC960_PD_reset_ctrl;
3145
3146         return 0;
3147 }
3148
3149 static irqreturn_t DAC960_PD_intr_handler(int irq, void *arg)
3150 {
3151         struct myrb_hba *cb = arg;
3152         void __iomem *base = cb->io_base;
3153         unsigned long flags;
3154
3155         spin_lock_irqsave(&cb->queue_lock, flags);
3156         while (DAC960_PD_hw_mbox_status_available(base)) {
3157                 unsigned char id = DAC960_PD_read_status_cmd_ident(base);
3158                 struct scsi_cmnd *scmd = NULL;
3159                 struct myrb_cmdblk *cmd_blk = NULL;
3160
3161                 if (id == MYRB_DCMD_TAG)
3162                         cmd_blk = &cb->dcmd_blk;
3163                 else if (id == MYRB_MCMD_TAG)
3164                         cmd_blk = &cb->mcmd_blk;
3165                 else {
3166                         scmd = scsi_host_find_tag(cb->host, id - 3);
3167                         if (scmd)
3168                                 cmd_blk = scsi_cmd_priv(scmd);
3169                 }
3170                 if (cmd_blk)
3171                         cmd_blk->status = DAC960_PD_read_status(base);
3172                 else
3173                         dev_err(&cb->pdev->dev,
3174                                 "Unhandled command completion %d\n", id);
3175
3176                 DAC960_PD_ack_intr(base);
3177                 DAC960_PD_ack_hw_mbox_status(base);
3178
3179                 if (id < 3)
3180                         myrb_handle_cmdblk(cb, cmd_blk);
3181                 else
3182                         myrb_handle_scsi(cb, cmd_blk, scmd);
3183         }
3184         spin_unlock_irqrestore(&cb->queue_lock, flags);
3185         return IRQ_HANDLED;
3186 }
3187
3188 static struct myrb_privdata DAC960_PD_privdata = {
3189         .hw_init =      DAC960_PD_hw_init,
3190         .irq_handler =  DAC960_PD_intr_handler,
3191         .mmio_size =    DAC960_PD_mmio_size,
3192 };
3193
3194
3195 /*
3196  * DAC960 P Series Controllers
3197  *
3198  * Similar to the DAC960 PD Series Controllers, but some commands have
3199  * to be translated.
3200  */
3201
3202 static inline void myrb_translate_enquiry(void *enq)
3203 {
3204         memcpy(enq + 132, enq + 36, 64);
3205         memset(enq + 36, 0, 96);
3206 }
3207
3208 static inline void myrb_translate_devstate(void *state)
3209 {
3210         memcpy(state + 2, state + 3, 1);
3211         memmove(state + 4, state + 5, 2);
3212         memmove(state + 6, state + 8, 4);
3213 }
3214
3215 static inline void myrb_translate_to_rw_command(struct myrb_cmdblk *cmd_blk)
3216 {
3217         union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3218         int ldev_num = mbox->type5.ld.ldev_num;
3219
3220         mbox->bytes[3] &= 0x7;
3221         mbox->bytes[3] |= mbox->bytes[7] << 6;
3222         mbox->bytes[7] = ldev_num;
3223 }
3224
3225 static inline void myrb_translate_from_rw_command(struct myrb_cmdblk *cmd_blk)
3226 {
3227         union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3228         int ldev_num = mbox->bytes[7];
3229
3230         mbox->bytes[7] = mbox->bytes[3] >> 6;
3231         mbox->bytes[3] &= 0x7;
3232         mbox->bytes[3] |= ldev_num << 3;
3233 }
3234
3235 static void DAC960_P_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
3236 {
3237         void __iomem *base = cb->io_base;
3238         union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3239
3240         switch (mbox->common.opcode) {
3241         case MYRB_CMD_ENQUIRY:
3242                 mbox->common.opcode = MYRB_CMD_ENQUIRY_OLD;
3243                 break;
3244         case MYRB_CMD_GET_DEVICE_STATE:
3245                 mbox->common.opcode = MYRB_CMD_GET_DEVICE_STATE_OLD;
3246                 break;
3247         case MYRB_CMD_READ:
3248                 mbox->common.opcode = MYRB_CMD_READ_OLD;
3249                 myrb_translate_to_rw_command(cmd_blk);
3250                 break;
3251         case MYRB_CMD_WRITE:
3252                 mbox->common.opcode = MYRB_CMD_WRITE_OLD;
3253                 myrb_translate_to_rw_command(cmd_blk);
3254                 break;
3255         case MYRB_CMD_READ_SG:
3256                 mbox->common.opcode = MYRB_CMD_READ_SG_OLD;
3257                 myrb_translate_to_rw_command(cmd_blk);
3258                 break;
3259         case MYRB_CMD_WRITE_SG:
3260                 mbox->common.opcode = MYRB_CMD_WRITE_SG_OLD;
3261                 myrb_translate_to_rw_command(cmd_blk);
3262                 break;
3263         default:
3264                 break;
3265         }
3266         while (DAC960_PD_hw_mbox_is_full(base))
3267                 udelay(1);
3268         DAC960_PD_write_cmd_mbox(base, mbox);
3269         DAC960_PD_hw_mbox_new_cmd(base);
3270 }
3271
3272
3273 static int DAC960_P_hw_init(struct pci_dev *pdev,
3274                 struct myrb_hba *cb, void __iomem *base)
3275 {
3276         int timeout = 0;
3277         unsigned char error, parm0, parm1;
3278
3279         if (!request_region(cb->io_addr, 0x80, "myrb")) {
3280                 dev_err(&pdev->dev, "IO port 0x%lx busy\n",
3281                         (unsigned long)cb->io_addr);
3282                 return -EBUSY;
3283         }
3284         DAC960_PD_disable_intr(base);
3285         DAC960_PD_ack_hw_mbox_status(base);
3286         udelay(1000);
3287         while (DAC960_PD_init_in_progress(base) &&
3288                timeout < MYRB_MAILBOX_TIMEOUT) {
3289                 if (DAC960_PD_read_error_status(base, &error,
3290                                                 &parm0, &parm1) &&
3291                     myrb_err_status(cb, error, parm0, parm1))
3292                         return -EAGAIN;
3293                 udelay(10);
3294                 timeout++;
3295         }
3296         if (timeout == MYRB_MAILBOX_TIMEOUT) {
3297                 dev_err(&pdev->dev,
3298                         "Timeout waiting for Controller Initialisation\n");
3299                 return -ETIMEDOUT;
3300         }
3301         if (!myrb_enable_mmio(cb, NULL)) {
3302                 dev_err(&pdev->dev,
3303                         "Unable to allocate DMA mapped memory\n");
3304                 DAC960_PD_reset_ctrl(base);
3305                 return -ETIMEDOUT;
3306         }
3307         DAC960_PD_enable_intr(base);
3308         cb->qcmd = DAC960_P_qcmd;
3309         cb->disable_intr = DAC960_PD_disable_intr;
3310         cb->reset = DAC960_PD_reset_ctrl;
3311
3312         return 0;
3313 }
3314
3315 static irqreturn_t DAC960_P_intr_handler(int irq, void *arg)
3316 {
3317         struct myrb_hba *cb = arg;
3318         void __iomem *base = cb->io_base;
3319         unsigned long flags;
3320
3321         spin_lock_irqsave(&cb->queue_lock, flags);
3322         while (DAC960_PD_hw_mbox_status_available(base)) {
3323                 unsigned char id = DAC960_PD_read_status_cmd_ident(base);
3324                 struct scsi_cmnd *scmd = NULL;
3325                 struct myrb_cmdblk *cmd_blk = NULL;
3326                 union myrb_cmd_mbox *mbox;
3327                 enum myrb_cmd_opcode op;
3328
3329
3330                 if (id == MYRB_DCMD_TAG)
3331                         cmd_blk = &cb->dcmd_blk;
3332                 else if (id == MYRB_MCMD_TAG)
3333                         cmd_blk = &cb->mcmd_blk;
3334                 else {
3335                         scmd = scsi_host_find_tag(cb->host, id - 3);
3336                         if (scmd)
3337                                 cmd_blk = scsi_cmd_priv(scmd);
3338                 }
3339                 if (cmd_blk)
3340                         cmd_blk->status = DAC960_PD_read_status(base);
3341                 else
3342                         dev_err(&cb->pdev->dev,
3343                                 "Unhandled command completion %d\n", id);
3344
3345                 DAC960_PD_ack_intr(base);
3346                 DAC960_PD_ack_hw_mbox_status(base);
3347
3348                 if (!cmd_blk)
3349                         continue;
3350
3351                 mbox = &cmd_blk->mbox;
3352                 op = mbox->common.opcode;
3353                 switch (op) {
3354                 case MYRB_CMD_ENQUIRY_OLD:
3355                         mbox->common.opcode = MYRB_CMD_ENQUIRY;
3356                         myrb_translate_enquiry(cb->enquiry);
3357                         break;
3358                 case MYRB_CMD_READ_OLD:
3359                         mbox->common.opcode = MYRB_CMD_READ;
3360                         myrb_translate_from_rw_command(cmd_blk);
3361                         break;
3362                 case MYRB_CMD_WRITE_OLD:
3363                         mbox->common.opcode = MYRB_CMD_WRITE;
3364                         myrb_translate_from_rw_command(cmd_blk);
3365                         break;
3366                 case MYRB_CMD_READ_SG_OLD:
3367                         mbox->common.opcode = MYRB_CMD_READ_SG;
3368                         myrb_translate_from_rw_command(cmd_blk);
3369                         break;
3370                 case MYRB_CMD_WRITE_SG_OLD:
3371                         mbox->common.opcode = MYRB_CMD_WRITE_SG;
3372                         myrb_translate_from_rw_command(cmd_blk);
3373                         break;
3374                 default:
3375                         break;
3376                 }
3377                 if (id < 3)
3378                         myrb_handle_cmdblk(cb, cmd_blk);
3379                 else
3380                         myrb_handle_scsi(cb, cmd_blk, scmd);
3381         }
3382         spin_unlock_irqrestore(&cb->queue_lock, flags);
3383         return IRQ_HANDLED;
3384 }
3385
3386 static struct myrb_privdata DAC960_P_privdata = {
3387         .hw_init =      DAC960_P_hw_init,
3388         .irq_handler =  DAC960_P_intr_handler,
3389         .mmio_size =    DAC960_PD_mmio_size,
3390 };
3391
3392 static struct myrb_hba *myrb_detect(struct pci_dev *pdev,
3393                 const struct pci_device_id *entry)
3394 {
3395         struct myrb_privdata *privdata =
3396                 (struct myrb_privdata *)entry->driver_data;
3397         irq_handler_t irq_handler = privdata->irq_handler;
3398         unsigned int mmio_size = privdata->mmio_size;
3399         struct Scsi_Host *shost;
3400         struct myrb_hba *cb = NULL;
3401
3402         shost = scsi_host_alloc(&myrb_template, sizeof(struct myrb_hba));
3403         if (!shost) {
3404                 dev_err(&pdev->dev, "Unable to allocate Controller\n");
3405                 return NULL;
3406         }
3407         shost->max_cmd_len = 12;
3408         shost->max_lun = 256;
3409         cb = shost_priv(shost);
3410         mutex_init(&cb->dcmd_mutex);
3411         mutex_init(&cb->dma_mutex);
3412         cb->pdev = pdev;
3413         cb->host = shost;
3414
3415         if (pci_enable_device(pdev)) {
3416                 dev_err(&pdev->dev, "Failed to enable PCI device\n");
3417                 scsi_host_put(shost);
3418                 return NULL;
3419         }
3420
3421         if (privdata->hw_init == DAC960_PD_hw_init ||
3422             privdata->hw_init == DAC960_P_hw_init) {
3423                 cb->io_addr = pci_resource_start(pdev, 0);
3424                 cb->pci_addr = pci_resource_start(pdev, 1);
3425         } else
3426                 cb->pci_addr = pci_resource_start(pdev, 0);
3427
3428         pci_set_drvdata(pdev, cb);
3429         spin_lock_init(&cb->queue_lock);
3430         if (mmio_size < PAGE_SIZE)
3431                 mmio_size = PAGE_SIZE;
3432         cb->mmio_base = ioremap(cb->pci_addr & PAGE_MASK, mmio_size);
3433         if (cb->mmio_base == NULL) {
3434                 dev_err(&pdev->dev,
3435                         "Unable to map Controller Register Window\n");
3436                 goto failure;
3437         }
3438
3439         cb->io_base = cb->mmio_base + (cb->pci_addr & ~PAGE_MASK);
3440         if (privdata->hw_init(pdev, cb, cb->io_base))
3441                 goto failure;
3442
3443         if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrb", cb) < 0) {
3444                 dev_err(&pdev->dev,
3445                         "Unable to acquire IRQ Channel %d\n", pdev->irq);
3446                 goto failure;
3447         }
3448         cb->irq = pdev->irq;
3449         return cb;
3450
3451 failure:
3452         dev_err(&pdev->dev,
3453                 "Failed to initialize Controller\n");
3454         myrb_cleanup(cb);
3455         return NULL;
3456 }
3457
3458 static int myrb_probe(struct pci_dev *dev, const struct pci_device_id *entry)
3459 {
3460         struct myrb_hba *cb;
3461         int ret;
3462
3463         cb = myrb_detect(dev, entry);
3464         if (!cb)
3465                 return -ENODEV;
3466
3467         ret = myrb_get_hba_config(cb);
3468         if (ret < 0) {
3469                 myrb_cleanup(cb);
3470                 return ret;
3471         }
3472
3473         if (!myrb_create_mempools(dev, cb)) {
3474                 ret = -ENOMEM;
3475                 goto failed;
3476         }
3477
3478         ret = scsi_add_host(cb->host, &dev->dev);
3479         if (ret) {
3480                 dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret);
3481                 myrb_destroy_mempools(cb);
3482                 goto failed;
3483         }
3484         scsi_scan_host(cb->host);
3485         return 0;
3486 failed:
3487         myrb_cleanup(cb);
3488         return ret;
3489 }
3490
3491
3492 static void myrb_remove(struct pci_dev *pdev)
3493 {
3494         struct myrb_hba *cb = pci_get_drvdata(pdev);
3495
3496         shost_printk(KERN_NOTICE, cb->host, "Flushing Cache...");
3497         myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
3498         myrb_cleanup(cb);
3499         myrb_destroy_mempools(cb);
3500 }
3501
3502
3503 static const struct pci_device_id myrb_id_table[] = {
3504         {
3505                 PCI_DEVICE_SUB(PCI_VENDOR_ID_DEC,
3506                                PCI_DEVICE_ID_DEC_21285,
3507                                PCI_VENDOR_ID_MYLEX,
3508                                PCI_DEVICE_ID_MYLEX_DAC960_LA),
3509                 .driver_data    = (unsigned long) &DAC960_LA_privdata,
3510         },
3511         {
3512                 PCI_DEVICE_DATA(MYLEX, DAC960_PG, &DAC960_PG_privdata),
3513         },
3514         {
3515                 PCI_DEVICE_DATA(MYLEX, DAC960_PD, &DAC960_PD_privdata),
3516         },
3517         {
3518                 PCI_DEVICE_DATA(MYLEX, DAC960_P, &DAC960_P_privdata),
3519         },
3520         {0, },
3521 };
3522
3523 MODULE_DEVICE_TABLE(pci, myrb_id_table);
3524
3525 static struct pci_driver myrb_pci_driver = {
3526         .name           = "myrb",
3527         .id_table       = myrb_id_table,
3528         .probe          = myrb_probe,
3529         .remove         = myrb_remove,
3530 };
3531
3532 static int __init myrb_init_module(void)
3533 {
3534         int ret;
3535
3536         myrb_raid_template = raid_class_attach(&myrb_raid_functions);
3537         if (!myrb_raid_template)
3538                 return -ENODEV;
3539
3540         ret = pci_register_driver(&myrb_pci_driver);
3541         if (ret)
3542                 raid_class_release(myrb_raid_template);
3543
3544         return ret;
3545 }
3546
3547 static void __exit myrb_cleanup_module(void)
3548 {
3549         pci_unregister_driver(&myrb_pci_driver);
3550         raid_class_release(myrb_raid_template);
3551 }
3552
3553 module_init(myrb_init_module);
3554 module_exit(myrb_cleanup_module);
3555
3556 MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (Block interface)");
3557 MODULE_AUTHOR("Hannes Reinecke <hare@suse.com>");
3558 MODULE_LICENSE("GPL");