GNU Linux-libre 4.9.317-gnu1
[releases.git] / drivers / staging / unisys / visorhba / visorhba_main.c
1 /* Copyright (c) 2012 - 2015 UNISYS CORPORATION
2  * All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or (at
7  * your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12  * NON INFRINGEMENT.  See the GNU General Public License for more
13  * details.
14  */
15
16 #include <linux/debugfs.h>
17 #include <linux/skbuff.h>
18 #include <linux/kthread.h>
19 #include <linux/idr.h>
20 #include <linux/seq_file.h>
21 #include <scsi/scsi.h>
22 #include <scsi/scsi_host.h>
23 #include <scsi/scsi_cmnd.h>
24 #include <scsi/scsi_device.h>
25
26 #include "visorbus.h"
27 #include "iochannel.h"
28
29 /* The Send and Receive Buffers of the IO Queue may both be full */
30
31 #define IOS_ERROR_THRESHOLD     1000
32 /* MAX_BUF = 6 lines x 10 MAXVHBA x 80 characters
33  *         = 4800 bytes ~ 2^13 = 8192 bytes
34  */
35 #define MAX_BUF                 8192
36 #define MAX_PENDING_REQUESTS    (MIN_NUMSIGNALS * 2)
37 #define VISORHBA_ERROR_COUNT    30
38
39 static struct dentry *visorhba_debugfs_dir;
40
41 /* GUIDS for HBA channel type supported by this driver */
42 static struct visor_channeltype_descriptor visorhba_channel_types[] = {
43         /* Note that the only channel type we expect to be reported by the
44          * bus driver is the SPAR_VHBA channel.
45          */
46         { SPAR_VHBA_CHANNEL_PROTOCOL_UUID, "sparvhba" },
47         { NULL_UUID_LE, NULL }
48 };
49
50 MODULE_DEVICE_TABLE(visorbus, visorhba_channel_types);
51 MODULE_ALIAS("visorbus:" SPAR_VHBA_CHANNEL_PROTOCOL_UUID_STR);
52
53 struct visordisk_info {
54         u32 valid;
55         u32 channel, id, lun;   /* Disk Path */
56         atomic_t ios_threshold;
57         atomic_t error_count;
58         struct visordisk_info *next;
59 };
60
61 struct scsipending {
62         struct uiscmdrsp cmdrsp;
63         void *sent;             /* The Data being tracked */
64         char cmdtype;           /* Type of pointer that is being stored */
65 };
66
67 /* Each scsi_host has a host_data area that contains this struct. */
68 struct visorhba_devdata {
69         struct Scsi_Host *scsihost;
70         struct visor_device *dev;
71         struct list_head dev_info_list;
72         /* Tracks the requests that have been forwarded to
73          * the IOVM and haven't returned yet
74          */
75         struct scsipending pending[MAX_PENDING_REQUESTS];
76         /* Start search for next pending free slot here */
77         unsigned int nextinsert;
78         spinlock_t privlock; /* lock to protect data in devdata */
79         bool serverdown;
80         bool serverchangingstate;
81         unsigned long long acquire_failed_cnt;
82         unsigned long long interrupts_rcvd;
83         unsigned long long interrupts_notme;
84         unsigned long long interrupts_disabled;
85         u64 __iomem *flags_addr;
86         atomic_t interrupt_rcvd;
87         wait_queue_head_t rsp_queue;
88         struct visordisk_info head;
89         unsigned int max_buff_len;
90         int devnum;
91         struct task_struct *thread;
92         int thread_wait_ms;
93
94         /*
95          * allows us to pass int handles back-and-forth between us and
96          * iovm, instead of raw pointers
97          */
98         struct idr idr;
99
100         struct dentry *debugfs_dir;
101         struct dentry *debugfs_info;
102 };
103
104 struct visorhba_devices_open {
105         struct visorhba_devdata *devdata;
106 };
107
108 #define for_each_vdisk_match(iter, list, match)                   \
109         for (iter = &list->head; iter->next; iter = iter->next) \
110                 if ((iter->channel == match->channel) &&                  \
111                     (iter->id == match->id) &&                    \
112                     (iter->lun == match->lun))
113 /**
114  *      visor_thread_start - starts a thread for the device
115  *      @threadfn: Function the thread starts
116  *      @thrcontext: Context to pass to the thread, i.e. devdata
117  *      @name: string describing name of thread
118  *
119  *      Starts a thread for the device.
120  *
121  *      Return the task_struct * denoting the thread on success,
122  *             or NULL on failure
123  */
124 static struct task_struct *visor_thread_start
125 (int (*threadfn)(void *), void *thrcontext, char *name)
126 {
127         struct task_struct *task;
128
129         task = kthread_run(threadfn, thrcontext, "%s", name);
130         if (IS_ERR(task)) {
131                 pr_err("visorbus failed to start thread\n");
132                 return NULL;
133         }
134         return task;
135 }
136
137 /**
138  *      visor_thread_stop - stops the thread if it is running
139  */
140 static void visor_thread_stop(struct task_struct *task)
141 {
142         if (!task)
143                 return;  /* no thread running */
144         kthread_stop(task);
145 }
146
147 /**
148  *      add_scsipending_entry - save off io command that is pending in
149  *                              Service Partition
150  *      @devdata: Pointer to devdata
151  *      @cmdtype: Specifies the type of command pending
152  *      @new:   The command to be saved
153  *
154  *      Saves off the io command that is being handled by the Service
155  *      Partition so that it can be handled when it completes. If new is
156  *      NULL it is assumed the entry refers only to the cmdrsp.
157  *      Returns insert_location where entry was added,
158  *      -EBUSY if it can't
159  */
160 static int add_scsipending_entry(struct visorhba_devdata *devdata,
161                                  char cmdtype, void *new)
162 {
163         unsigned long flags;
164         struct scsipending *entry;
165         int insert_location;
166
167         spin_lock_irqsave(&devdata->privlock, flags);
168         insert_location = devdata->nextinsert;
169         while (devdata->pending[insert_location].sent) {
170                 insert_location = (insert_location + 1) % MAX_PENDING_REQUESTS;
171                 if (insert_location == (int)devdata->nextinsert) {
172                         spin_unlock_irqrestore(&devdata->privlock, flags);
173                         return -EBUSY;
174                 }
175         }
176
177         entry = &devdata->pending[insert_location];
178         memset(&entry->cmdrsp, 0, sizeof(entry->cmdrsp));
179         entry->cmdtype = cmdtype;
180         if (new)
181                 entry->sent = new;
182         else /* wants to send cmdrsp */
183                 entry->sent = &entry->cmdrsp;
184         devdata->nextinsert = (insert_location + 1) % MAX_PENDING_REQUESTS;
185         spin_unlock_irqrestore(&devdata->privlock, flags);
186
187         return insert_location;
188 }
189
190 /**
191  *      del_scsipending_enty - removes an entry from the pending array
192  *      @devdata: Device holding the pending array
193  *      @del: Entry to remove
194  *
195  *      Removes the entry pointed at by del and returns it.
196  *      Returns the scsipending entry pointed at
197  */
198 static void *del_scsipending_ent(struct visorhba_devdata *devdata,
199                                  int del)
200 {
201         unsigned long flags;
202         void *sent;
203
204         if (del >= MAX_PENDING_REQUESTS)
205                 return NULL;
206
207         spin_lock_irqsave(&devdata->privlock, flags);
208         sent = devdata->pending[del].sent;
209
210         devdata->pending[del].cmdtype = 0;
211         devdata->pending[del].sent = NULL;
212         spin_unlock_irqrestore(&devdata->privlock, flags);
213
214         return sent;
215 }
216
217 /**
218  *      get_scsipending_cmdrsp - return the cmdrsp stored in a pending entry
219  *      #ddata: Device holding the pending array
220  *      @ent: Entry that stores the cmdrsp
221  *
222  *      Each scsipending entry has a cmdrsp in it. The cmdrsp is only valid
223  *      if the "sent" field is not NULL
224  *      Returns a pointer to the cmdrsp.
225  */
226 static struct uiscmdrsp *get_scsipending_cmdrsp(struct visorhba_devdata *ddata,
227                                                 int ent)
228 {
229         if (ddata->pending[ent].sent)
230                 return &ddata->pending[ent].cmdrsp;
231
232         return NULL;
233 }
234
235 /**
236  *      simple_idr_get - associate a provided pointer with an int value
237  *                       1 <= value <= INT_MAX, and return this int value;
238  *                       the pointer value can be obtained later by passing
239  *                       this int value to idr_find()
240  *      @idrtable: the data object maintaining the pointer<-->int mappings
241  *      @p: the pointer value to be remembered
242  *      @lock: a spinlock used when exclusive access to idrtable is needed
243  */
244 static unsigned int simple_idr_get(struct idr *idrtable, void *p,
245                                    spinlock_t *lock)
246 {
247         int id;
248         unsigned long flags;
249
250         idr_preload(GFP_KERNEL);
251         spin_lock_irqsave(lock, flags);
252         id = idr_alloc(idrtable, p, 1, INT_MAX, GFP_NOWAIT);
253         spin_unlock_irqrestore(lock, flags);
254         idr_preload_end();
255         if (id < 0)
256                 return 0;  /* failure */
257         return (unsigned int)(id);  /* idr_alloc() guarantees > 0 */
258 }
259
260 /**
261  *      setup_scsitaskmgmt_handles - stash the necessary handles so that the
262  *                                   completion processing logic for a taskmgmt
263  *                                   cmd will be able to find who to wake up
264  *                                   and where to stash the result
265  */
266 static void setup_scsitaskmgmt_handles(struct idr *idrtable, spinlock_t *lock,
267                                        struct uiscmdrsp *cmdrsp,
268                                        wait_queue_head_t *event, int *result)
269 {
270         /* specify the event that has to be triggered when this */
271         /* cmd is complete */
272         cmdrsp->scsitaskmgmt.notify_handle =
273                 simple_idr_get(idrtable, event, lock);
274         cmdrsp->scsitaskmgmt.notifyresult_handle =
275                 simple_idr_get(idrtable, result, lock);
276 }
277
278 /**
279  *      cleanup_scsitaskmgmt_handles - forget handles created by
280  *                                     setup_scsitaskmgmt_handles()
281  */
282 static void cleanup_scsitaskmgmt_handles(struct idr *idrtable,
283                                          struct uiscmdrsp *cmdrsp)
284 {
285         if (cmdrsp->scsitaskmgmt.notify_handle)
286                 idr_remove(idrtable, cmdrsp->scsitaskmgmt.notify_handle);
287         if (cmdrsp->scsitaskmgmt.notifyresult_handle)
288                 idr_remove(idrtable, cmdrsp->scsitaskmgmt.notifyresult_handle);
289 }
290
291 /**
292  *      forward_taskmgmt_command - send taskmegmt command to the Service
293  *                                 Partition
294  *      @tasktype: Type of taskmgmt command
295  *      @scsidev: Scsidev that issued command
296  *
297  *      Create a cmdrsp packet and send it to the Serivce Partition
298  *      that will service this request.
299  *      Returns whether the command was queued successfully or not.
300  */
301 static int forward_taskmgmt_command(enum task_mgmt_types tasktype,
302                                     struct scsi_cmnd *scsicmd)
303 {
304         struct uiscmdrsp *cmdrsp;
305         struct scsi_device *scsidev = scsicmd->device;
306         struct visorhba_devdata *devdata =
307                 (struct visorhba_devdata *)scsidev->host->hostdata;
308         int notifyresult = 0xffff;
309         wait_queue_head_t notifyevent;
310         int scsicmd_id = 0;
311
312         if (devdata->serverdown || devdata->serverchangingstate)
313                 return FAILED;
314
315         scsicmd_id = add_scsipending_entry(devdata, CMD_SCSITASKMGMT_TYPE,
316                                            NULL);
317         if (scsicmd_id < 0)
318                 return FAILED;
319
320         cmdrsp = get_scsipending_cmdrsp(devdata, scsicmd_id);
321
322         init_waitqueue_head(&notifyevent);
323
324         /* issue TASK_MGMT_ABORT_TASK */
325         cmdrsp->cmdtype = CMD_SCSITASKMGMT_TYPE;
326         setup_scsitaskmgmt_handles(&devdata->idr, &devdata->privlock, cmdrsp,
327                                    &notifyevent, &notifyresult);
328
329         /* save destination */
330         cmdrsp->scsitaskmgmt.tasktype = tasktype;
331         cmdrsp->scsitaskmgmt.vdest.channel = scsidev->channel;
332         cmdrsp->scsitaskmgmt.vdest.id = scsidev->id;
333         cmdrsp->scsitaskmgmt.vdest.lun = scsidev->lun;
334         cmdrsp->scsitaskmgmt.handle = scsicmd_id;
335
336         dev_dbg(&scsidev->sdev_gendev,
337                 "visorhba: initiating type=%d taskmgmt command\n", tasktype);
338         if (visorchannel_signalinsert(devdata->dev->visorchannel,
339                                       IOCHAN_TO_IOPART,
340                                       cmdrsp))
341                 goto err_del_scsipending_ent;
342
343         /* It can take the Service Partition up to 35 seconds to complete
344          * an IO in some cases, so wait 45 seconds and error out
345          */
346         if (!wait_event_timeout(notifyevent, notifyresult != 0xffff,
347                                 msecs_to_jiffies(45000)))
348                 goto err_del_scsipending_ent;
349
350         dev_dbg(&scsidev->sdev_gendev,
351                 "visorhba: taskmgmt type=%d success; result=0x%x\n",
352                  tasktype, notifyresult);
353         if (tasktype == TASK_MGMT_ABORT_TASK)
354                 scsicmd->result = DID_ABORT << 16;
355         else
356                 scsicmd->result = DID_RESET << 16;
357
358         scsicmd->scsi_done(scsicmd);
359         cleanup_scsitaskmgmt_handles(&devdata->idr, cmdrsp);
360         return SUCCESS;
361
362 err_del_scsipending_ent:
363         dev_dbg(&scsidev->sdev_gendev,
364                 "visorhba: taskmgmt type=%d not executed\n", tasktype);
365         del_scsipending_ent(devdata, scsicmd_id);
366         cleanup_scsitaskmgmt_handles(&devdata->idr, cmdrsp);
367         return FAILED;
368 }
369
370 /**
371  *      visorhba_abort_handler - Send TASK_MGMT_ABORT_TASK
372  *      @scsicmd: The scsicmd that needs aborted
373  *
374  *      Returns SUCCESS if inserted, failure otherwise
375  *
376  */
377 static int visorhba_abort_handler(struct scsi_cmnd *scsicmd)
378 {
379         /* issue TASK_MGMT_ABORT_TASK */
380         struct scsi_device *scsidev;
381         struct visordisk_info *vdisk;
382         struct visorhba_devdata *devdata;
383
384         scsidev = scsicmd->device;
385         devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
386         for_each_vdisk_match(vdisk, devdata, scsidev) {
387                 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
388                         atomic_inc(&vdisk->error_count);
389                 else
390                         atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
391         }
392         return forward_taskmgmt_command(TASK_MGMT_ABORT_TASK, scsicmd);
393 }
394
395 /**
396  *      visorhba_device_reset_handler - Send TASK_MGMT_LUN_RESET
397  *      @scsicmd: The scsicmd that needs aborted
398  *
399  *      Returns SUCCESS if inserted, failure otherwise
400  */
401 static int visorhba_device_reset_handler(struct scsi_cmnd *scsicmd)
402 {
403         /* issue TASK_MGMT_LUN_RESET */
404         struct scsi_device *scsidev;
405         struct visordisk_info *vdisk;
406         struct visorhba_devdata *devdata;
407
408         scsidev = scsicmd->device;
409         devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
410         for_each_vdisk_match(vdisk, devdata, scsidev) {
411                 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
412                         atomic_inc(&vdisk->error_count);
413                 else
414                         atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
415         }
416         return forward_taskmgmt_command(TASK_MGMT_LUN_RESET, scsicmd);
417 }
418
419 /**
420  *      visorhba_bus_reset_handler - Send TASK_MGMT_TARGET_RESET for each
421  *                                   target on the bus
422  *      @scsicmd: The scsicmd that needs aborted
423  *
424  *      Returns SUCCESS
425  */
426 static int visorhba_bus_reset_handler(struct scsi_cmnd *scsicmd)
427 {
428         struct scsi_device *scsidev;
429         struct visordisk_info *vdisk;
430         struct visorhba_devdata *devdata;
431
432         scsidev = scsicmd->device;
433         devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
434         for_each_vdisk_match(vdisk, devdata, scsidev) {
435                 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
436                         atomic_inc(&vdisk->error_count);
437                 else
438                         atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
439         }
440         return forward_taskmgmt_command(TASK_MGMT_BUS_RESET, scsicmd);
441 }
442
443 /**
444  *      visorhba_host_reset_handler - Not supported
445  *      @scsicmd: The scsicmd that needs aborted
446  *
447  *      Not supported, return SUCCESS
448  *      Returns SUCCESS
449  */
450 static int
451 visorhba_host_reset_handler(struct scsi_cmnd *scsicmd)
452 {
453         /* issue TASK_MGMT_TARGET_RESET for each target on each bus for host */
454         return SUCCESS;
455 }
456
457 /**
458  *      visorhba_get_info
459  *      @shp: Scsi host that is requesting information
460  *
461  *      Returns string with info
462  */
463 static const char *visorhba_get_info(struct Scsi_Host *shp)
464 {
465         /* Return version string */
466         return "visorhba";
467 }
468
469 /**
470  *      visorhba_queue_command_lck -- queues command to the Service Partition
471  *      @scsicmd: Command to be queued
472  *      @vsiorhba_cmnd_done: Done command to call when scsicmd is returned
473  *
474  *      Queues to scsicmd to the ServicePartition after converting it to a
475  *      uiscmdrsp structure.
476  *
477  *      Returns success if queued to the Service Partition, otherwise
478  *      failure.
479  */
480 static int
481 visorhba_queue_command_lck(struct scsi_cmnd *scsicmd,
482                            void (*visorhba_cmnd_done)(struct scsi_cmnd *))
483 {
484         struct uiscmdrsp *cmdrsp;
485         struct scsi_device *scsidev = scsicmd->device;
486         int insert_location;
487         unsigned char *cdb = scsicmd->cmnd;
488         struct Scsi_Host *scsihost = scsidev->host;
489         unsigned int i;
490         struct visorhba_devdata *devdata =
491                 (struct visorhba_devdata *)scsihost->hostdata;
492         struct scatterlist *sg = NULL;
493         struct scatterlist *sglist = NULL;
494
495         if (devdata->serverdown || devdata->serverchangingstate)
496                 return SCSI_MLQUEUE_DEVICE_BUSY;
497
498         insert_location = add_scsipending_entry(devdata, CMD_SCSI_TYPE,
499                                                 (void *)scsicmd);
500
501         if (insert_location < 0)
502                 return SCSI_MLQUEUE_DEVICE_BUSY;
503
504         cmdrsp = get_scsipending_cmdrsp(devdata, insert_location);
505
506         cmdrsp->cmdtype = CMD_SCSI_TYPE;
507         /* save the pending insertion location. Deletion from pending
508          * will return the scsicmd pointer for completion
509          */
510         cmdrsp->scsi.handle = insert_location;
511
512         /* save done function that we have call when cmd is complete */
513         scsicmd->scsi_done = visorhba_cmnd_done;
514         /* save destination */
515         cmdrsp->scsi.vdest.channel = scsidev->channel;
516         cmdrsp->scsi.vdest.id = scsidev->id;
517         cmdrsp->scsi.vdest.lun = scsidev->lun;
518         /* save datadir */
519         cmdrsp->scsi.data_dir = scsicmd->sc_data_direction;
520         memcpy(cmdrsp->scsi.cmnd, cdb, MAX_CMND_SIZE);
521
522         cmdrsp->scsi.bufflen = scsi_bufflen(scsicmd);
523
524         /* keep track of the max buffer length so far. */
525         if (cmdrsp->scsi.bufflen > devdata->max_buff_len)
526                 devdata->max_buff_len = cmdrsp->scsi.bufflen;
527
528         if (scsi_sg_count(scsicmd) > MAX_PHYS_INFO)
529                 goto err_del_scsipending_ent;
530
531         /* convert buffer to phys information  */
532         /* buffer is scatterlist - copy it out */
533         sglist = scsi_sglist(scsicmd);
534
535         for_each_sg(sglist, sg, scsi_sg_count(scsicmd), i) {
536                 cmdrsp->scsi.gpi_list[i].address = sg_phys(sg);
537                 cmdrsp->scsi.gpi_list[i].length = sg->length;
538         }
539         cmdrsp->scsi.guest_phys_entries = scsi_sg_count(scsicmd);
540
541         if (visorchannel_signalinsert(devdata->dev->visorchannel,
542                                       IOCHAN_TO_IOPART,
543                                       cmdrsp))
544                 /* queue must be full and we aren't going to wait */
545                 goto err_del_scsipending_ent;
546
547         return 0;
548
549 err_del_scsipending_ent:
550         del_scsipending_ent(devdata, insert_location);
551         return SCSI_MLQUEUE_DEVICE_BUSY;
552 }
553
554 #ifdef DEF_SCSI_QCMD
555 static DEF_SCSI_QCMD(visorhba_queue_command)
556 #else
557 #define visorhba_queue_command visorhba_queue_command_lck
558 #endif
559
560 /**
561  *      visorhba_slave_alloc - called when new disk is discovered
562  *      @scsidev: New disk
563  *
564  *      Create a new visordisk_info structure and add it to our
565  *      list of vdisks.
566  *
567  *      Returns success when created, otherwise error.
568  */
569 static int visorhba_slave_alloc(struct scsi_device *scsidev)
570 {
571         /* this is called by the midlayer before scan for new devices --
572          * LLD can alloc any struct & do init if needed.
573          */
574         struct visordisk_info *vdisk;
575         struct visordisk_info *tmpvdisk;
576         struct visorhba_devdata *devdata;
577         struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host;
578
579         devdata = (struct visorhba_devdata *)scsihost->hostdata;
580         if (!devdata)
581                 return 0; /* even though we errored, treat as success */
582
583         for_each_vdisk_match(vdisk, devdata, scsidev)
584                 return 0; /* already allocated return success */
585
586         tmpvdisk = kzalloc(sizeof(*tmpvdisk), GFP_ATOMIC);
587         if (!tmpvdisk)
588                 return -ENOMEM;
589
590         tmpvdisk->channel = scsidev->channel;
591         tmpvdisk->id = scsidev->id;
592         tmpvdisk->lun = scsidev->lun;
593         vdisk->next = tmpvdisk;
594         return 0;
595 }
596
597 /**
598  *      visorhba_slave_destroy - disk is going away
599  *      @scsidev: scsi device going away
600  *
601  *      Disk is going away, clean up resources.
602  *      Returns void.
603  */
604 static void visorhba_slave_destroy(struct scsi_device *scsidev)
605 {
606         /* midlevel calls this after device has been quiesced and
607          * before it is to be deleted.
608          */
609         struct visordisk_info *vdisk, *delvdisk;
610         struct visorhba_devdata *devdata;
611         struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host;
612
613         devdata = (struct visorhba_devdata *)scsihost->hostdata;
614         for_each_vdisk_match(vdisk, devdata, scsidev) {
615                 delvdisk = vdisk->next;
616                 vdisk->next = delvdisk->next;
617                 kfree(delvdisk);
618                 return;
619         }
620 }
621
622 static struct scsi_host_template visorhba_driver_template = {
623         .name = "Unisys Visor HBA",
624         .info = visorhba_get_info,
625         .queuecommand = visorhba_queue_command,
626         .eh_abort_handler = visorhba_abort_handler,
627         .eh_device_reset_handler = visorhba_device_reset_handler,
628         .eh_bus_reset_handler = visorhba_bus_reset_handler,
629         .eh_host_reset_handler = visorhba_host_reset_handler,
630         .shost_attrs = NULL,
631 #define visorhba_MAX_CMNDS 128
632         .can_queue = visorhba_MAX_CMNDS,
633         .sg_tablesize = 64,
634         .this_id = -1,
635         .slave_alloc = visorhba_slave_alloc,
636         .slave_destroy = visorhba_slave_destroy,
637         .use_clustering = ENABLE_CLUSTERING,
638 };
639
640 /**
641  *      info_debugfs_show - debugfs interface to dump visorhba states
642  *
643  *      This presents a file in the debugfs tree named:
644  *          /visorhba/vbus<x>:dev<y>/info
645  */
646 static int info_debugfs_show(struct seq_file *seq, void *v)
647 {
648         struct visorhba_devdata *devdata = seq->private;
649
650         seq_printf(seq, "max_buff_len = %u\n", devdata->max_buff_len);
651         seq_printf(seq, "interrupts_rcvd = %llu\n", devdata->interrupts_rcvd);
652         seq_printf(seq, "interrupts_disabled = %llu\n",
653                    devdata->interrupts_disabled);
654         seq_printf(seq, "interrupts_notme = %llu\n",
655                    devdata->interrupts_notme);
656         seq_printf(seq, "flags_addr = %p\n", devdata->flags_addr);
657         if (devdata->flags_addr) {
658                 u64 phys_flags_addr =
659                         virt_to_phys((__force  void *)devdata->flags_addr);
660                 seq_printf(seq, "phys_flags_addr = 0x%016llx\n",
661                            phys_flags_addr);
662                 seq_printf(seq, "FeatureFlags = %llu\n",
663                            (__le64)readq(devdata->flags_addr));
664         }
665         seq_printf(seq, "acquire_failed_cnt = %llu\n",
666                    devdata->acquire_failed_cnt);
667
668         return 0;
669 }
670
671 static int info_debugfs_open(struct inode *inode, struct file *file)
672 {
673         return single_open(file, info_debugfs_show, inode->i_private);
674 }
675
676 static const struct file_operations info_debugfs_fops = {
677         .owner = THIS_MODULE,
678         .open = info_debugfs_open,
679         .read = seq_read,
680         .llseek = seq_lseek,
681         .release = single_release,
682 };
683
684 /**
685  *      complete_taskmgmt_command - complete task management
686  *      @cmdrsp: Response from the IOVM
687  *
688  *      Service Partition returned the result of the task management
689  *      command. Wake up anyone waiting for it.
690  *      Returns void
691  */
692 static inline void complete_taskmgmt_command
693 (struct idr *idrtable, struct uiscmdrsp *cmdrsp, int result)
694 {
695         wait_queue_head_t *wq =
696                 idr_find(idrtable, cmdrsp->scsitaskmgmt.notify_handle);
697         int *scsi_result_ptr =
698                 idr_find(idrtable, cmdrsp->scsitaskmgmt.notifyresult_handle);
699
700         if (unlikely(!(wq && scsi_result_ptr))) {
701                 pr_err("visorhba: no completion context; cmd will time out\n");
702                 return;
703         }
704
705         /* copy the result of the taskmgmt and
706          * wake up the error handler that is waiting for this
707          */
708         pr_debug("visorhba: notifying initiator with result=0x%x\n", result);
709         *scsi_result_ptr = result;
710         wake_up_all(wq);
711 }
712
713 /**
714  *      visorhba_serverdown_complete - Called when we are done cleaning up
715  *                                     from serverdown
716  *      @work: work structure for this serverdown request
717  *
718  *      Called when we are done cleanning up from serverdown, stop processing
719  *      queue, fail pending IOs.
720  *      Returns void when finished cleaning up
721  */
722 static void visorhba_serverdown_complete(struct visorhba_devdata *devdata)
723 {
724         int i;
725         struct scsipending *pendingdel = NULL;
726         struct scsi_cmnd *scsicmd = NULL;
727         struct uiscmdrsp *cmdrsp;
728         unsigned long flags;
729
730         /* Stop using the IOVM response queue (queue should be drained
731          * by the end)
732          */
733         visor_thread_stop(devdata->thread);
734
735         /* Fail commands that weren't completed */
736         spin_lock_irqsave(&devdata->privlock, flags);
737         for (i = 0; i < MAX_PENDING_REQUESTS; i++) {
738                 pendingdel = &devdata->pending[i];
739                 switch (pendingdel->cmdtype) {
740                 case CMD_SCSI_TYPE:
741                         scsicmd = pendingdel->sent;
742                         scsicmd->result = DID_RESET << 16;
743                         if (scsicmd->scsi_done)
744                                 scsicmd->scsi_done(scsicmd);
745                         break;
746                 case CMD_SCSITASKMGMT_TYPE:
747                         cmdrsp = pendingdel->sent;
748                         complete_taskmgmt_command(&devdata->idr, cmdrsp,
749                                                   TASK_MGMT_FAILED);
750                         break;
751                 default:
752                         break;
753                 }
754                 pendingdel->cmdtype = 0;
755                 pendingdel->sent = NULL;
756         }
757         spin_unlock_irqrestore(&devdata->privlock, flags);
758
759         devdata->serverdown = true;
760         devdata->serverchangingstate = false;
761 }
762
763 /**
764  *      visorhba_serverdown - Got notified that the IOVM is down
765  *      @devdata: visorhba that is being serviced by downed IOVM.
766  *
767  *      Something happened to the IOVM, return immediately and
768  *      schedule work cleanup work.
769  *      Return SUCCESS or EINVAL
770  */
771 static int visorhba_serverdown(struct visorhba_devdata *devdata)
772 {
773         if (!devdata->serverdown && !devdata->serverchangingstate) {
774                 devdata->serverchangingstate = true;
775                 visorhba_serverdown_complete(devdata);
776         } else if (devdata->serverchangingstate) {
777                 return -EINVAL;
778         }
779         return 0;
780 }
781
782 /**
783  *      do_scsi_linuxstat - scsi command returned linuxstat
784  *      @cmdrsp: response from IOVM
785  *      @scsicmd: Command issued.
786  *
787  *      Don't log errors for disk-not-present inquiries
788  *      Returns void
789  */
790 static void
791 do_scsi_linuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
792 {
793         struct visorhba_devdata *devdata;
794         struct visordisk_info *vdisk;
795         struct scsi_device *scsidev;
796
797         scsidev = scsicmd->device;
798         memcpy(scsicmd->sense_buffer, cmdrsp->scsi.sensebuf, MAX_SENSE_SIZE);
799
800         /* Do not log errors for disk-not-present inquiries */
801         if ((cmdrsp->scsi.cmnd[0] == INQUIRY) &&
802             (host_byte(cmdrsp->scsi.linuxstat) == DID_NO_CONNECT) &&
803             (cmdrsp->scsi.addlstat == ADDL_SEL_TIMEOUT))
804                 return;
805         /* Okay see what our error_count is here.... */
806         devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
807         for_each_vdisk_match(vdisk, devdata, scsidev) {
808                 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT) {
809                         atomic_inc(&vdisk->error_count);
810                         atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
811                 }
812         }
813 }
814
815 static int set_no_disk_inquiry_result(unsigned char *buf,
816                                       size_t len, bool is_lun0)
817 {
818         if (!buf || len < NO_DISK_INQUIRY_RESULT_LEN)
819                 return -EINVAL;
820         memset(buf, 0, NO_DISK_INQUIRY_RESULT_LEN);
821         buf[2] = SCSI_SPC2_VER;
822         if (is_lun0) {
823                 buf[0] = DEV_DISK_CAPABLE_NOT_PRESENT;
824                 buf[3] = DEV_HISUPPORT;
825         } else {
826                 buf[0] = DEV_NOT_CAPABLE;
827         }
828         buf[4] = NO_DISK_INQUIRY_RESULT_LEN - 5;
829         strncpy(buf + 8, "DELLPSEUDO DEVICE .", NO_DISK_INQUIRY_RESULT_LEN - 8);
830         return 0;
831 }
832
833 /**
834  *      do_scsi_nolinuxstat - scsi command didn't have linuxstat
835  *      @cmdrsp: response from IOVM
836  *      @scsicmd: Command issued.
837  *
838  *      Handle response when no linuxstat was returned
839  *      Returns void
840  */
841 static void
842 do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
843 {
844         struct scsi_device *scsidev;
845         unsigned char *buf;
846         struct scatterlist *sg;
847         unsigned int i;
848         char *this_page;
849         char *this_page_orig;
850         int bufind = 0;
851         struct visordisk_info *vdisk;
852         struct visorhba_devdata *devdata;
853
854         scsidev = scsicmd->device;
855         if ((cmdrsp->scsi.cmnd[0] == INQUIRY) &&
856             (cmdrsp->scsi.bufflen >= MIN_INQUIRY_RESULT_LEN)) {
857                 if (cmdrsp->scsi.no_disk_result == 0)
858                         return;
859
860                 buf = kzalloc(sizeof(char) * 36, GFP_KERNEL);
861                 if (!buf)
862                         return;
863
864                 /* Linux scsi code wants a device at Lun 0
865                  * to issue report luns, but we don't want
866                  * a disk there so we'll present a processor
867                  * there.
868                  */
869                 set_no_disk_inquiry_result(buf, (size_t)cmdrsp->scsi.bufflen,
870                                            scsidev->lun == 0);
871
872                 if (scsi_sg_count(scsicmd) == 0) {
873                         memcpy(scsi_sglist(scsicmd), buf,
874                                cmdrsp->scsi.bufflen);
875                         kfree(buf);
876                         return;
877                 }
878
879                 sg = scsi_sglist(scsicmd);
880                 for (i = 0; i < scsi_sg_count(scsicmd); i++) {
881                         this_page_orig = kmap_atomic(sg_page(sg + i));
882                         this_page = (void *)((unsigned long)this_page_orig |
883                                              sg[i].offset);
884                         memcpy(this_page, buf + bufind, sg[i].length);
885                         kunmap_atomic(this_page_orig);
886                 }
887                 kfree(buf);
888         } else {
889                 devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
890                 for_each_vdisk_match(vdisk, devdata, scsidev) {
891                         if (atomic_read(&vdisk->ios_threshold) > 0) {
892                                 atomic_dec(&vdisk->ios_threshold);
893                                 if (atomic_read(&vdisk->ios_threshold) == 0)
894                                         atomic_set(&vdisk->error_count, 0);
895                         }
896                 }
897         }
898 }
899
900 /**
901  *      complete_scsi_command - complete a scsi command
902  *      @uiscmdrsp: Response from Service Partition
903  *      @scsicmd: The scsi command
904  *
905  *      Response returned by the Service Partition, finish it and send
906  *      completion to the scsi midlayer.
907  *      Returns void.
908  */
909 static void
910 complete_scsi_command(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
911 {
912         /* take what we need out of cmdrsp and complete the scsicmd */
913         scsicmd->result = cmdrsp->scsi.linuxstat;
914         if (cmdrsp->scsi.linuxstat)
915                 do_scsi_linuxstat(cmdrsp, scsicmd);
916         else
917                 do_scsi_nolinuxstat(cmdrsp, scsicmd);
918
919         scsicmd->scsi_done(scsicmd);
920 }
921
922 /**
923  *      drain_queue - pull responses out of iochannel
924  *      @cmdrsp: Response from the IOSP
925  *      @devdata: device that owns this iochannel
926  *
927  *      Pulls responses out of the iochannel and process the responses.
928  *      Restuns void
929  */
930 static void
931 drain_queue(struct uiscmdrsp *cmdrsp, struct visorhba_devdata *devdata)
932 {
933         struct scsi_cmnd *scsicmd;
934
935         while (1) {
936                 if (visorchannel_signalremove(devdata->dev->visorchannel,
937                                               IOCHAN_FROM_IOPART,
938                                               cmdrsp))
939                         break; /* queue empty */
940
941                 if (cmdrsp->cmdtype == CMD_SCSI_TYPE) {
942                         /* scsicmd location is returned by the
943                          * deletion
944                          */
945                         scsicmd = del_scsipending_ent(devdata,
946                                                       cmdrsp->scsi.handle);
947                         if (!scsicmd)
948                                 break;
949                         /* complete the orig cmd */
950                         complete_scsi_command(cmdrsp, scsicmd);
951                 } else if (cmdrsp->cmdtype == CMD_SCSITASKMGMT_TYPE) {
952                         if (!del_scsipending_ent(devdata,
953                                                  cmdrsp->scsitaskmgmt.handle))
954                                 break;
955                         complete_taskmgmt_command(&devdata->idr, cmdrsp,
956                                                   cmdrsp->scsitaskmgmt.result);
957                 } else if (cmdrsp->cmdtype == CMD_NOTIFYGUEST_TYPE)
958                         dev_err_once(&devdata->dev->device,
959                                      "ignoring unsupported NOTIFYGUEST\n");
960                 /* cmdrsp is now available for re-use */
961         }
962 }
963
964 /**
965  *      process_incoming_rsps - Process responses from IOSP
966  *      @v: void pointer to visorhba_devdata
967  *
968  *      Main function for the thread that processes the responses
969  *      from the IO Service Partition. When the queue is empty, wait
970  *      to check to see if it is full again.
971  */
972 static int process_incoming_rsps(void *v)
973 {
974         struct visorhba_devdata *devdata = v;
975         struct uiscmdrsp *cmdrsp = NULL;
976         const int size = sizeof(*cmdrsp);
977
978         cmdrsp = kmalloc(size, GFP_ATOMIC);
979         if (!cmdrsp)
980                 return -ENOMEM;
981
982         while (1) {
983                 if (kthread_should_stop())
984                         break;
985                 wait_event_interruptible_timeout(
986                         devdata->rsp_queue, (atomic_read(
987                                              &devdata->interrupt_rcvd) == 1),
988                                 msecs_to_jiffies(devdata->thread_wait_ms));
989                 /* drain queue */
990                 drain_queue(cmdrsp, devdata);
991         }
992         kfree(cmdrsp);
993         return 0;
994 }
995
996 /**
997  *      visorhba_pause - function to handle visorbus pause messages
998  *      @dev: device that is pausing.
999  *      @complete_func: function to call when finished
1000  *
1001  *      Something has happened to the IO Service Partition that is
1002  *      handling this device. Quiet this device and reset commands
1003  *      so that the Service Partition can be corrected.
1004  *      Returns SUCCESS
1005  */
1006 static int visorhba_pause(struct visor_device *dev,
1007                           visorbus_state_complete_func complete_func)
1008 {
1009         struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
1010
1011         visorhba_serverdown(devdata);
1012         complete_func(dev, 0);
1013         return 0;
1014 }
1015
1016 /**
1017  *      visorhba_resume - function called when the IO Service Partition is back
1018  *      @dev: device that is pausing.
1019  *      @complete_func: function to call when finished
1020  *
1021  *      Yay! The IO Service Partition is back, the channel has been wiped
1022  *      so lets re-establish connection and start processing responses.
1023  *      Returns 0 on success, error on failure.
1024  */
1025 static int visorhba_resume(struct visor_device *dev,
1026                            visorbus_state_complete_func complete_func)
1027 {
1028         struct visorhba_devdata *devdata;
1029
1030         devdata = dev_get_drvdata(&dev->device);
1031         if (!devdata)
1032                 return -EINVAL;
1033
1034         if (devdata->serverdown && !devdata->serverchangingstate)
1035                 devdata->serverchangingstate = true;
1036
1037         devdata->thread = visor_thread_start(process_incoming_rsps, devdata,
1038                                              "vhba_incming");
1039
1040         devdata->serverdown = false;
1041         devdata->serverchangingstate = false;
1042
1043         return 0;
1044 }
1045
1046 /**
1047  *      visorhba_probe - device has been discovered, do acquire
1048  *      @dev: visor_device that was discovered
1049  *
1050  *      A new HBA was discovered, do the initial connections of it.
1051  *      Return 0 on success, otherwise error.
1052  */
1053 static int visorhba_probe(struct visor_device *dev)
1054 {
1055         struct Scsi_Host *scsihost;
1056         struct vhba_config_max max;
1057         struct visorhba_devdata *devdata = NULL;
1058         int err, channel_offset;
1059         u64 features;
1060
1061         scsihost = scsi_host_alloc(&visorhba_driver_template,
1062                                    sizeof(*devdata));
1063         if (!scsihost)
1064                 return -ENODEV;
1065
1066         channel_offset = offsetof(struct spar_io_channel_protocol,
1067                                   vhba.max);
1068         err = visorbus_read_channel(dev, channel_offset, &max,
1069                                     sizeof(struct vhba_config_max));
1070         if (err < 0)
1071                 goto err_scsi_host_put;
1072
1073         scsihost->max_id = (unsigned int)max.max_id;
1074         scsihost->max_lun = (unsigned int)max.max_lun;
1075         scsihost->cmd_per_lun = (unsigned int)max.cmd_per_lun;
1076         scsihost->max_sectors =
1077             (unsigned short)(max.max_io_size >> 9);
1078         scsihost->sg_tablesize =
1079             (unsigned short)(max.max_io_size / PAGE_SIZE);
1080         if (scsihost->sg_tablesize > MAX_PHYS_INFO)
1081                 scsihost->sg_tablesize = MAX_PHYS_INFO;
1082         err = scsi_add_host(scsihost, &dev->device);
1083         if (err < 0)
1084                 goto err_scsi_host_put;
1085
1086         devdata = (struct visorhba_devdata *)scsihost->hostdata;
1087         devdata->dev = dev;
1088         dev_set_drvdata(&dev->device, devdata);
1089
1090         devdata->debugfs_dir = debugfs_create_dir(dev_name(&dev->device),
1091                                                   visorhba_debugfs_dir);
1092         if (!devdata->debugfs_dir) {
1093                 err = -ENOMEM;
1094                 goto err_scsi_remove_host;
1095         }
1096         devdata->debugfs_info =
1097                 debugfs_create_file("info", S_IRUSR | S_IRGRP,
1098                                     devdata->debugfs_dir, devdata,
1099                                     &info_debugfs_fops);
1100         if (!devdata->debugfs_info) {
1101                 err = -ENOMEM;
1102                 goto err_debugfs_dir;
1103         }
1104
1105         init_waitqueue_head(&devdata->rsp_queue);
1106         spin_lock_init(&devdata->privlock);
1107         devdata->serverdown = false;
1108         devdata->serverchangingstate = false;
1109         devdata->scsihost = scsihost;
1110
1111         channel_offset = offsetof(struct spar_io_channel_protocol,
1112                                   channel_header.features);
1113         err = visorbus_read_channel(dev, channel_offset, &features, 8);
1114         if (err)
1115                 goto err_debugfs_info;
1116         features |= ULTRA_IO_CHANNEL_IS_POLLING;
1117         err = visorbus_write_channel(dev, channel_offset, &features, 8);
1118         if (err)
1119                 goto err_debugfs_info;
1120
1121         idr_init(&devdata->idr);
1122
1123         devdata->thread_wait_ms = 2;
1124         devdata->thread = visor_thread_start(process_incoming_rsps, devdata,
1125                                              "vhba_incoming");
1126
1127         scsi_scan_host(scsihost);
1128
1129         return 0;
1130
1131 err_debugfs_info:
1132         debugfs_remove(devdata->debugfs_info);
1133
1134 err_debugfs_dir:
1135         debugfs_remove_recursive(devdata->debugfs_dir);
1136
1137 err_scsi_remove_host:
1138         scsi_remove_host(scsihost);
1139
1140 err_scsi_host_put:
1141         scsi_host_put(scsihost);
1142         return err;
1143 }
1144
1145 /**
1146  *      visorhba_remove - remove a visorhba device
1147  *      @dev: Device to remove
1148  *
1149  *      Removes the visorhba device.
1150  *      Returns void.
1151  */
1152 static void visorhba_remove(struct visor_device *dev)
1153 {
1154         struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
1155         struct Scsi_Host *scsihost = NULL;
1156
1157         if (!devdata)
1158                 return;
1159
1160         scsihost = devdata->scsihost;
1161         visor_thread_stop(devdata->thread);
1162         scsi_remove_host(scsihost);
1163         scsi_host_put(scsihost);
1164
1165         idr_destroy(&devdata->idr);
1166
1167         dev_set_drvdata(&dev->device, NULL);
1168         debugfs_remove(devdata->debugfs_info);
1169         debugfs_remove_recursive(devdata->debugfs_dir);
1170 }
1171
1172 /* This is used to tell the visor bus driver which types of visor devices
1173  * we support, and what functions to call when a visor device that we support
1174  * is attached or removed.
1175  */
1176 static struct visor_driver visorhba_driver = {
1177         .name = "visorhba",
1178         .owner = THIS_MODULE,
1179         .channel_types = visorhba_channel_types,
1180         .probe = visorhba_probe,
1181         .remove = visorhba_remove,
1182         .pause = visorhba_pause,
1183         .resume = visorhba_resume,
1184         .channel_interrupt = NULL,
1185 };
1186
1187 /**
1188  *      visorhba_init           - driver init routine
1189  *
1190  *      Initialize the visorhba driver and register it with visorbus
1191  *      to handle s-Par virtual host bus adapter.
1192  */
1193 static int visorhba_init(void)
1194 {
1195         int rc = -ENOMEM;
1196
1197         visorhba_debugfs_dir = debugfs_create_dir("visorhba", NULL);
1198         if (!visorhba_debugfs_dir)
1199                 return -ENOMEM;
1200
1201         rc = visorbus_register_visor_driver(&visorhba_driver);
1202         if (rc)
1203                 goto cleanup_debugfs;
1204
1205         return 0;
1206
1207 cleanup_debugfs:
1208         debugfs_remove_recursive(visorhba_debugfs_dir);
1209
1210         return rc;
1211 }
1212
1213 /**
1214  *      visorhba_cleanup        - driver exit routine
1215  *
1216  *      Unregister driver from the bus and free up memory.
1217  */
1218 static void visorhba_exit(void)
1219 {
1220         visorbus_unregister_visor_driver(&visorhba_driver);
1221         debugfs_remove_recursive(visorhba_debugfs_dir);
1222 }
1223
1224 module_init(visorhba_init);
1225 module_exit(visorhba_exit);
1226
1227 MODULE_AUTHOR("Unisys");
1228 MODULE_LICENSE("GPL");
1229 MODULE_DESCRIPTION("s-Par HBA driver for virtual SCSI host busses");