1 /* Copyright (c) 2012 - 2015 UNISYS CORPORATION
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or (at
7 * your option) any later version.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more
16 #include <linux/debugfs.h>
17 #include <linux/skbuff.h>
18 #include <linux/kthread.h>
19 #include <linux/idr.h>
20 #include <linux/seq_file.h>
21 #include <scsi/scsi.h>
22 #include <scsi/scsi_host.h>
23 #include <scsi/scsi_cmnd.h>
24 #include <scsi/scsi_device.h>
27 #include "iochannel.h"
29 /* The Send and Receive Buffers of the IO Queue may both be full */
31 #define IOS_ERROR_THRESHOLD 1000
32 /* MAX_BUF = 6 lines x 10 MAXVHBA x 80 characters
33 * = 4800 bytes ~ 2^13 = 8192 bytes
36 #define MAX_PENDING_REQUESTS (MIN_NUMSIGNALS * 2)
37 #define VISORHBA_ERROR_COUNT 30
39 static struct dentry *visorhba_debugfs_dir;
41 /* GUIDS for HBA channel type supported by this driver */
42 static struct visor_channeltype_descriptor visorhba_channel_types[] = {
43 /* Note that the only channel type we expect to be reported by the
44 * bus driver is the SPAR_VHBA channel.
46 { SPAR_VHBA_CHANNEL_PROTOCOL_UUID, "sparvhba" },
47 { NULL_UUID_LE, NULL }
50 MODULE_DEVICE_TABLE(visorbus, visorhba_channel_types);
51 MODULE_ALIAS("visorbus:" SPAR_VHBA_CHANNEL_PROTOCOL_UUID_STR);
53 struct visordisk_info {
55 u32 channel, id, lun; /* Disk Path */
56 atomic_t ios_threshold;
58 struct visordisk_info *next;
62 struct uiscmdrsp cmdrsp;
63 void *sent; /* The Data being tracked */
64 char cmdtype; /* Type of pointer that is being stored */
67 /* Each scsi_host has a host_data area that contains this struct. */
68 struct visorhba_devdata {
69 struct Scsi_Host *scsihost;
70 struct visor_device *dev;
71 struct list_head dev_info_list;
72 /* Tracks the requests that have been forwarded to
73 * the IOVM and haven't returned yet
75 struct scsipending pending[MAX_PENDING_REQUESTS];
76 /* Start search for next pending free slot here */
77 unsigned int nextinsert;
78 spinlock_t privlock; /* lock to protect data in devdata */
80 bool serverchangingstate;
81 unsigned long long acquire_failed_cnt;
82 unsigned long long interrupts_rcvd;
83 unsigned long long interrupts_notme;
84 unsigned long long interrupts_disabled;
85 u64 __iomem *flags_addr;
86 atomic_t interrupt_rcvd;
87 wait_queue_head_t rsp_queue;
88 struct visordisk_info head;
89 unsigned int max_buff_len;
91 struct task_struct *thread;
95 * allows us to pass int handles back-and-forth between us and
96 * iovm, instead of raw pointers
100 struct dentry *debugfs_dir;
101 struct dentry *debugfs_info;
104 struct visorhba_devices_open {
105 struct visorhba_devdata *devdata;
108 #define for_each_vdisk_match(iter, list, match) \
109 for (iter = &list->head; iter->next; iter = iter->next) \
110 if ((iter->channel == match->channel) && \
111 (iter->id == match->id) && \
112 (iter->lun == match->lun))
114 * visor_thread_start - starts a thread for the device
115 * @threadfn: Function the thread starts
116 * @thrcontext: Context to pass to the thread, i.e. devdata
117 * @name: string describing name of thread
119 * Starts a thread for the device.
121 * Return the task_struct * denoting the thread on success,
124 static struct task_struct *visor_thread_start
125 (int (*threadfn)(void *), void *thrcontext, char *name)
127 struct task_struct *task;
129 task = kthread_run(threadfn, thrcontext, "%s", name);
131 pr_err("visorbus failed to start thread\n");
138 * visor_thread_stop - stops the thread if it is running
140 static void visor_thread_stop(struct task_struct *task)
143 return; /* no thread running */
148 * add_scsipending_entry - save off io command that is pending in
150 * @devdata: Pointer to devdata
151 * @cmdtype: Specifies the type of command pending
152 * @new: The command to be saved
154 * Saves off the io command that is being handled by the Service
155 * Partition so that it can be handled when it completes. If new is
156 * NULL it is assumed the entry refers only to the cmdrsp.
157 * Returns insert_location where entry was added,
160 static int add_scsipending_entry(struct visorhba_devdata *devdata,
161 char cmdtype, void *new)
164 struct scsipending *entry;
167 spin_lock_irqsave(&devdata->privlock, flags);
168 insert_location = devdata->nextinsert;
169 while (devdata->pending[insert_location].sent) {
170 insert_location = (insert_location + 1) % MAX_PENDING_REQUESTS;
171 if (insert_location == (int)devdata->nextinsert) {
172 spin_unlock_irqrestore(&devdata->privlock, flags);
177 entry = &devdata->pending[insert_location];
178 memset(&entry->cmdrsp, 0, sizeof(entry->cmdrsp));
179 entry->cmdtype = cmdtype;
182 else /* wants to send cmdrsp */
183 entry->sent = &entry->cmdrsp;
184 devdata->nextinsert = (insert_location + 1) % MAX_PENDING_REQUESTS;
185 spin_unlock_irqrestore(&devdata->privlock, flags);
187 return insert_location;
191 * del_scsipending_enty - removes an entry from the pending array
192 * @devdata: Device holding the pending array
193 * @del: Entry to remove
195 * Removes the entry pointed at by del and returns it.
196 * Returns the scsipending entry pointed at
198 static void *del_scsipending_ent(struct visorhba_devdata *devdata,
204 if (del >= MAX_PENDING_REQUESTS)
207 spin_lock_irqsave(&devdata->privlock, flags);
208 sent = devdata->pending[del].sent;
210 devdata->pending[del].cmdtype = 0;
211 devdata->pending[del].sent = NULL;
212 spin_unlock_irqrestore(&devdata->privlock, flags);
218 * get_scsipending_cmdrsp - return the cmdrsp stored in a pending entry
219 * #ddata: Device holding the pending array
220 * @ent: Entry that stores the cmdrsp
222 * Each scsipending entry has a cmdrsp in it. The cmdrsp is only valid
223 * if the "sent" field is not NULL
224 * Returns a pointer to the cmdrsp.
226 static struct uiscmdrsp *get_scsipending_cmdrsp(struct visorhba_devdata *ddata,
229 if (ddata->pending[ent].sent)
230 return &ddata->pending[ent].cmdrsp;
236 * simple_idr_get - associate a provided pointer with an int value
237 * 1 <= value <= INT_MAX, and return this int value;
238 * the pointer value can be obtained later by passing
239 * this int value to idr_find()
240 * @idrtable: the data object maintaining the pointer<-->int mappings
241 * @p: the pointer value to be remembered
242 * @lock: a spinlock used when exclusive access to idrtable is needed
244 static unsigned int simple_idr_get(struct idr *idrtable, void *p,
250 idr_preload(GFP_KERNEL);
251 spin_lock_irqsave(lock, flags);
252 id = idr_alloc(idrtable, p, 1, INT_MAX, GFP_NOWAIT);
253 spin_unlock_irqrestore(lock, flags);
256 return 0; /* failure */
257 return (unsigned int)(id); /* idr_alloc() guarantees > 0 */
261 * setup_scsitaskmgmt_handles - stash the necessary handles so that the
262 * completion processing logic for a taskmgmt
263 * cmd will be able to find who to wake up
264 * and where to stash the result
266 static void setup_scsitaskmgmt_handles(struct idr *idrtable, spinlock_t *lock,
267 struct uiscmdrsp *cmdrsp,
268 wait_queue_head_t *event, int *result)
270 /* specify the event that has to be triggered when this */
271 /* cmd is complete */
272 cmdrsp->scsitaskmgmt.notify_handle =
273 simple_idr_get(idrtable, event, lock);
274 cmdrsp->scsitaskmgmt.notifyresult_handle =
275 simple_idr_get(idrtable, result, lock);
279 * cleanup_scsitaskmgmt_handles - forget handles created by
280 * setup_scsitaskmgmt_handles()
282 static void cleanup_scsitaskmgmt_handles(struct idr *idrtable,
283 struct uiscmdrsp *cmdrsp)
285 if (cmdrsp->scsitaskmgmt.notify_handle)
286 idr_remove(idrtable, cmdrsp->scsitaskmgmt.notify_handle);
287 if (cmdrsp->scsitaskmgmt.notifyresult_handle)
288 idr_remove(idrtable, cmdrsp->scsitaskmgmt.notifyresult_handle);
292 * forward_taskmgmt_command - send taskmegmt command to the Service
294 * @tasktype: Type of taskmgmt command
295 * @scsidev: Scsidev that issued command
297 * Create a cmdrsp packet and send it to the Serivce Partition
298 * that will service this request.
299 * Returns whether the command was queued successfully or not.
301 static int forward_taskmgmt_command(enum task_mgmt_types tasktype,
302 struct scsi_cmnd *scsicmd)
304 struct uiscmdrsp *cmdrsp;
305 struct scsi_device *scsidev = scsicmd->device;
306 struct visorhba_devdata *devdata =
307 (struct visorhba_devdata *)scsidev->host->hostdata;
308 int notifyresult = 0xffff;
309 wait_queue_head_t notifyevent;
312 if (devdata->serverdown || devdata->serverchangingstate)
315 scsicmd_id = add_scsipending_entry(devdata, CMD_SCSITASKMGMT_TYPE,
320 cmdrsp = get_scsipending_cmdrsp(devdata, scsicmd_id);
322 init_waitqueue_head(¬ifyevent);
324 /* issue TASK_MGMT_ABORT_TASK */
325 cmdrsp->cmdtype = CMD_SCSITASKMGMT_TYPE;
326 setup_scsitaskmgmt_handles(&devdata->idr, &devdata->privlock, cmdrsp,
327 ¬ifyevent, ¬ifyresult);
329 /* save destination */
330 cmdrsp->scsitaskmgmt.tasktype = tasktype;
331 cmdrsp->scsitaskmgmt.vdest.channel = scsidev->channel;
332 cmdrsp->scsitaskmgmt.vdest.id = scsidev->id;
333 cmdrsp->scsitaskmgmt.vdest.lun = scsidev->lun;
334 cmdrsp->scsitaskmgmt.handle = scsicmd_id;
336 dev_dbg(&scsidev->sdev_gendev,
337 "visorhba: initiating type=%d taskmgmt command\n", tasktype);
338 if (visorchannel_signalinsert(devdata->dev->visorchannel,
341 goto err_del_scsipending_ent;
343 /* It can take the Service Partition up to 35 seconds to complete
344 * an IO in some cases, so wait 45 seconds and error out
346 if (!wait_event_timeout(notifyevent, notifyresult != 0xffff,
347 msecs_to_jiffies(45000)))
348 goto err_del_scsipending_ent;
350 dev_dbg(&scsidev->sdev_gendev,
351 "visorhba: taskmgmt type=%d success; result=0x%x\n",
352 tasktype, notifyresult);
353 if (tasktype == TASK_MGMT_ABORT_TASK)
354 scsicmd->result = DID_ABORT << 16;
356 scsicmd->result = DID_RESET << 16;
358 scsicmd->scsi_done(scsicmd);
359 cleanup_scsitaskmgmt_handles(&devdata->idr, cmdrsp);
362 err_del_scsipending_ent:
363 dev_dbg(&scsidev->sdev_gendev,
364 "visorhba: taskmgmt type=%d not executed\n", tasktype);
365 del_scsipending_ent(devdata, scsicmd_id);
366 cleanup_scsitaskmgmt_handles(&devdata->idr, cmdrsp);
371 * visorhba_abort_handler - Send TASK_MGMT_ABORT_TASK
372 * @scsicmd: The scsicmd that needs aborted
374 * Returns SUCCESS if inserted, failure otherwise
377 static int visorhba_abort_handler(struct scsi_cmnd *scsicmd)
379 /* issue TASK_MGMT_ABORT_TASK */
380 struct scsi_device *scsidev;
381 struct visordisk_info *vdisk;
382 struct visorhba_devdata *devdata;
384 scsidev = scsicmd->device;
385 devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
386 for_each_vdisk_match(vdisk, devdata, scsidev) {
387 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
388 atomic_inc(&vdisk->error_count);
390 atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
392 return forward_taskmgmt_command(TASK_MGMT_ABORT_TASK, scsicmd);
396 * visorhba_device_reset_handler - Send TASK_MGMT_LUN_RESET
397 * @scsicmd: The scsicmd that needs aborted
399 * Returns SUCCESS if inserted, failure otherwise
401 static int visorhba_device_reset_handler(struct scsi_cmnd *scsicmd)
403 /* issue TASK_MGMT_LUN_RESET */
404 struct scsi_device *scsidev;
405 struct visordisk_info *vdisk;
406 struct visorhba_devdata *devdata;
408 scsidev = scsicmd->device;
409 devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
410 for_each_vdisk_match(vdisk, devdata, scsidev) {
411 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
412 atomic_inc(&vdisk->error_count);
414 atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
416 return forward_taskmgmt_command(TASK_MGMT_LUN_RESET, scsicmd);
420 * visorhba_bus_reset_handler - Send TASK_MGMT_TARGET_RESET for each
422 * @scsicmd: The scsicmd that needs aborted
426 static int visorhba_bus_reset_handler(struct scsi_cmnd *scsicmd)
428 struct scsi_device *scsidev;
429 struct visordisk_info *vdisk;
430 struct visorhba_devdata *devdata;
432 scsidev = scsicmd->device;
433 devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
434 for_each_vdisk_match(vdisk, devdata, scsidev) {
435 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
436 atomic_inc(&vdisk->error_count);
438 atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
440 return forward_taskmgmt_command(TASK_MGMT_BUS_RESET, scsicmd);
444 * visorhba_host_reset_handler - Not supported
445 * @scsicmd: The scsicmd that needs aborted
447 * Not supported, return SUCCESS
451 visorhba_host_reset_handler(struct scsi_cmnd *scsicmd)
453 /* issue TASK_MGMT_TARGET_RESET for each target on each bus for host */
459 * @shp: Scsi host that is requesting information
461 * Returns string with info
463 static const char *visorhba_get_info(struct Scsi_Host *shp)
465 /* Return version string */
470 * visorhba_queue_command_lck -- queues command to the Service Partition
471 * @scsicmd: Command to be queued
472 * @vsiorhba_cmnd_done: Done command to call when scsicmd is returned
474 * Queues to scsicmd to the ServicePartition after converting it to a
475 * uiscmdrsp structure.
477 * Returns success if queued to the Service Partition, otherwise
481 visorhba_queue_command_lck(struct scsi_cmnd *scsicmd,
482 void (*visorhba_cmnd_done)(struct scsi_cmnd *))
484 struct uiscmdrsp *cmdrsp;
485 struct scsi_device *scsidev = scsicmd->device;
487 unsigned char *cdb = scsicmd->cmnd;
488 struct Scsi_Host *scsihost = scsidev->host;
490 struct visorhba_devdata *devdata =
491 (struct visorhba_devdata *)scsihost->hostdata;
492 struct scatterlist *sg = NULL;
493 struct scatterlist *sglist = NULL;
495 if (devdata->serverdown || devdata->serverchangingstate)
496 return SCSI_MLQUEUE_DEVICE_BUSY;
498 insert_location = add_scsipending_entry(devdata, CMD_SCSI_TYPE,
501 if (insert_location < 0)
502 return SCSI_MLQUEUE_DEVICE_BUSY;
504 cmdrsp = get_scsipending_cmdrsp(devdata, insert_location);
506 cmdrsp->cmdtype = CMD_SCSI_TYPE;
507 /* save the pending insertion location. Deletion from pending
508 * will return the scsicmd pointer for completion
510 cmdrsp->scsi.handle = insert_location;
512 /* save done function that we have call when cmd is complete */
513 scsicmd->scsi_done = visorhba_cmnd_done;
514 /* save destination */
515 cmdrsp->scsi.vdest.channel = scsidev->channel;
516 cmdrsp->scsi.vdest.id = scsidev->id;
517 cmdrsp->scsi.vdest.lun = scsidev->lun;
519 cmdrsp->scsi.data_dir = scsicmd->sc_data_direction;
520 memcpy(cmdrsp->scsi.cmnd, cdb, MAX_CMND_SIZE);
522 cmdrsp->scsi.bufflen = scsi_bufflen(scsicmd);
524 /* keep track of the max buffer length so far. */
525 if (cmdrsp->scsi.bufflen > devdata->max_buff_len)
526 devdata->max_buff_len = cmdrsp->scsi.bufflen;
528 if (scsi_sg_count(scsicmd) > MAX_PHYS_INFO)
529 goto err_del_scsipending_ent;
531 /* convert buffer to phys information */
532 /* buffer is scatterlist - copy it out */
533 sglist = scsi_sglist(scsicmd);
535 for_each_sg(sglist, sg, scsi_sg_count(scsicmd), i) {
536 cmdrsp->scsi.gpi_list[i].address = sg_phys(sg);
537 cmdrsp->scsi.gpi_list[i].length = sg->length;
539 cmdrsp->scsi.guest_phys_entries = scsi_sg_count(scsicmd);
541 if (visorchannel_signalinsert(devdata->dev->visorchannel,
544 /* queue must be full and we aren't going to wait */
545 goto err_del_scsipending_ent;
549 err_del_scsipending_ent:
550 del_scsipending_ent(devdata, insert_location);
551 return SCSI_MLQUEUE_DEVICE_BUSY;
555 static DEF_SCSI_QCMD(visorhba_queue_command)
557 #define visorhba_queue_command visorhba_queue_command_lck
561 * visorhba_slave_alloc - called when new disk is discovered
564 * Create a new visordisk_info structure and add it to our
567 * Returns success when created, otherwise error.
569 static int visorhba_slave_alloc(struct scsi_device *scsidev)
571 /* this is called by the midlayer before scan for new devices --
572 * LLD can alloc any struct & do init if needed.
574 struct visordisk_info *vdisk;
575 struct visordisk_info *tmpvdisk;
576 struct visorhba_devdata *devdata;
577 struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host;
579 devdata = (struct visorhba_devdata *)scsihost->hostdata;
581 return 0; /* even though we errored, treat as success */
583 for_each_vdisk_match(vdisk, devdata, scsidev)
584 return 0; /* already allocated return success */
586 tmpvdisk = kzalloc(sizeof(*tmpvdisk), GFP_ATOMIC);
590 tmpvdisk->channel = scsidev->channel;
591 tmpvdisk->id = scsidev->id;
592 tmpvdisk->lun = scsidev->lun;
593 vdisk->next = tmpvdisk;
598 * visorhba_slave_destroy - disk is going away
599 * @scsidev: scsi device going away
601 * Disk is going away, clean up resources.
604 static void visorhba_slave_destroy(struct scsi_device *scsidev)
606 /* midlevel calls this after device has been quiesced and
607 * before it is to be deleted.
609 struct visordisk_info *vdisk, *delvdisk;
610 struct visorhba_devdata *devdata;
611 struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host;
613 devdata = (struct visorhba_devdata *)scsihost->hostdata;
614 for_each_vdisk_match(vdisk, devdata, scsidev) {
615 delvdisk = vdisk->next;
616 vdisk->next = delvdisk->next;
622 static struct scsi_host_template visorhba_driver_template = {
623 .name = "Unisys Visor HBA",
624 .info = visorhba_get_info,
625 .queuecommand = visorhba_queue_command,
626 .eh_abort_handler = visorhba_abort_handler,
627 .eh_device_reset_handler = visorhba_device_reset_handler,
628 .eh_bus_reset_handler = visorhba_bus_reset_handler,
629 .eh_host_reset_handler = visorhba_host_reset_handler,
631 #define visorhba_MAX_CMNDS 128
632 .can_queue = visorhba_MAX_CMNDS,
635 .slave_alloc = visorhba_slave_alloc,
636 .slave_destroy = visorhba_slave_destroy,
637 .use_clustering = ENABLE_CLUSTERING,
641 * info_debugfs_show - debugfs interface to dump visorhba states
643 * This presents a file in the debugfs tree named:
644 * /visorhba/vbus<x>:dev<y>/info
646 static int info_debugfs_show(struct seq_file *seq, void *v)
648 struct visorhba_devdata *devdata = seq->private;
650 seq_printf(seq, "max_buff_len = %u\n", devdata->max_buff_len);
651 seq_printf(seq, "interrupts_rcvd = %llu\n", devdata->interrupts_rcvd);
652 seq_printf(seq, "interrupts_disabled = %llu\n",
653 devdata->interrupts_disabled);
654 seq_printf(seq, "interrupts_notme = %llu\n",
655 devdata->interrupts_notme);
656 seq_printf(seq, "flags_addr = %p\n", devdata->flags_addr);
657 if (devdata->flags_addr) {
658 u64 phys_flags_addr =
659 virt_to_phys((__force void *)devdata->flags_addr);
660 seq_printf(seq, "phys_flags_addr = 0x%016llx\n",
662 seq_printf(seq, "FeatureFlags = %llu\n",
663 (__le64)readq(devdata->flags_addr));
665 seq_printf(seq, "acquire_failed_cnt = %llu\n",
666 devdata->acquire_failed_cnt);
671 static int info_debugfs_open(struct inode *inode, struct file *file)
673 return single_open(file, info_debugfs_show, inode->i_private);
676 static const struct file_operations info_debugfs_fops = {
677 .owner = THIS_MODULE,
678 .open = info_debugfs_open,
681 .release = single_release,
685 * complete_taskmgmt_command - complete task management
686 * @cmdrsp: Response from the IOVM
688 * Service Partition returned the result of the task management
689 * command. Wake up anyone waiting for it.
692 static inline void complete_taskmgmt_command
693 (struct idr *idrtable, struct uiscmdrsp *cmdrsp, int result)
695 wait_queue_head_t *wq =
696 idr_find(idrtable, cmdrsp->scsitaskmgmt.notify_handle);
697 int *scsi_result_ptr =
698 idr_find(idrtable, cmdrsp->scsitaskmgmt.notifyresult_handle);
700 if (unlikely(!(wq && scsi_result_ptr))) {
701 pr_err("visorhba: no completion context; cmd will time out\n");
705 /* copy the result of the taskmgmt and
706 * wake up the error handler that is waiting for this
708 pr_debug("visorhba: notifying initiator with result=0x%x\n", result);
709 *scsi_result_ptr = result;
714 * visorhba_serverdown_complete - Called when we are done cleaning up
716 * @work: work structure for this serverdown request
718 * Called when we are done cleanning up from serverdown, stop processing
719 * queue, fail pending IOs.
720 * Returns void when finished cleaning up
722 static void visorhba_serverdown_complete(struct visorhba_devdata *devdata)
725 struct scsipending *pendingdel = NULL;
726 struct scsi_cmnd *scsicmd = NULL;
727 struct uiscmdrsp *cmdrsp;
730 /* Stop using the IOVM response queue (queue should be drained
733 visor_thread_stop(devdata->thread);
735 /* Fail commands that weren't completed */
736 spin_lock_irqsave(&devdata->privlock, flags);
737 for (i = 0; i < MAX_PENDING_REQUESTS; i++) {
738 pendingdel = &devdata->pending[i];
739 switch (pendingdel->cmdtype) {
741 scsicmd = pendingdel->sent;
742 scsicmd->result = DID_RESET << 16;
743 if (scsicmd->scsi_done)
744 scsicmd->scsi_done(scsicmd);
746 case CMD_SCSITASKMGMT_TYPE:
747 cmdrsp = pendingdel->sent;
748 complete_taskmgmt_command(&devdata->idr, cmdrsp,
754 pendingdel->cmdtype = 0;
755 pendingdel->sent = NULL;
757 spin_unlock_irqrestore(&devdata->privlock, flags);
759 devdata->serverdown = true;
760 devdata->serverchangingstate = false;
764 * visorhba_serverdown - Got notified that the IOVM is down
765 * @devdata: visorhba that is being serviced by downed IOVM.
767 * Something happened to the IOVM, return immediately and
768 * schedule work cleanup work.
769 * Return SUCCESS or EINVAL
771 static int visorhba_serverdown(struct visorhba_devdata *devdata)
773 if (!devdata->serverdown && !devdata->serverchangingstate) {
774 devdata->serverchangingstate = true;
775 visorhba_serverdown_complete(devdata);
776 } else if (devdata->serverchangingstate) {
783 * do_scsi_linuxstat - scsi command returned linuxstat
784 * @cmdrsp: response from IOVM
785 * @scsicmd: Command issued.
787 * Don't log errors for disk-not-present inquiries
791 do_scsi_linuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
793 struct visorhba_devdata *devdata;
794 struct visordisk_info *vdisk;
795 struct scsi_device *scsidev;
797 scsidev = scsicmd->device;
798 memcpy(scsicmd->sense_buffer, cmdrsp->scsi.sensebuf, MAX_SENSE_SIZE);
800 /* Do not log errors for disk-not-present inquiries */
801 if ((cmdrsp->scsi.cmnd[0] == INQUIRY) &&
802 (host_byte(cmdrsp->scsi.linuxstat) == DID_NO_CONNECT) &&
803 (cmdrsp->scsi.addlstat == ADDL_SEL_TIMEOUT))
805 /* Okay see what our error_count is here.... */
806 devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
807 for_each_vdisk_match(vdisk, devdata, scsidev) {
808 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT) {
809 atomic_inc(&vdisk->error_count);
810 atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
815 static int set_no_disk_inquiry_result(unsigned char *buf,
816 size_t len, bool is_lun0)
818 if (!buf || len < NO_DISK_INQUIRY_RESULT_LEN)
820 memset(buf, 0, NO_DISK_INQUIRY_RESULT_LEN);
821 buf[2] = SCSI_SPC2_VER;
823 buf[0] = DEV_DISK_CAPABLE_NOT_PRESENT;
824 buf[3] = DEV_HISUPPORT;
826 buf[0] = DEV_NOT_CAPABLE;
828 buf[4] = NO_DISK_INQUIRY_RESULT_LEN - 5;
829 strncpy(buf + 8, "DELLPSEUDO DEVICE .", NO_DISK_INQUIRY_RESULT_LEN - 8);
834 * do_scsi_nolinuxstat - scsi command didn't have linuxstat
835 * @cmdrsp: response from IOVM
836 * @scsicmd: Command issued.
838 * Handle response when no linuxstat was returned
842 do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
844 struct scsi_device *scsidev;
846 struct scatterlist *sg;
849 char *this_page_orig;
851 struct visordisk_info *vdisk;
852 struct visorhba_devdata *devdata;
854 scsidev = scsicmd->device;
855 if ((cmdrsp->scsi.cmnd[0] == INQUIRY) &&
856 (cmdrsp->scsi.bufflen >= MIN_INQUIRY_RESULT_LEN)) {
857 if (cmdrsp->scsi.no_disk_result == 0)
860 buf = kzalloc(sizeof(char) * 36, GFP_KERNEL);
864 /* Linux scsi code wants a device at Lun 0
865 * to issue report luns, but we don't want
866 * a disk there so we'll present a processor
869 set_no_disk_inquiry_result(buf, (size_t)cmdrsp->scsi.bufflen,
872 if (scsi_sg_count(scsicmd) == 0) {
873 memcpy(scsi_sglist(scsicmd), buf,
874 cmdrsp->scsi.bufflen);
879 sg = scsi_sglist(scsicmd);
880 for (i = 0; i < scsi_sg_count(scsicmd); i++) {
881 this_page_orig = kmap_atomic(sg_page(sg + i));
882 this_page = (void *)((unsigned long)this_page_orig |
884 memcpy(this_page, buf + bufind, sg[i].length);
885 kunmap_atomic(this_page_orig);
889 devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
890 for_each_vdisk_match(vdisk, devdata, scsidev) {
891 if (atomic_read(&vdisk->ios_threshold) > 0) {
892 atomic_dec(&vdisk->ios_threshold);
893 if (atomic_read(&vdisk->ios_threshold) == 0)
894 atomic_set(&vdisk->error_count, 0);
901 * complete_scsi_command - complete a scsi command
902 * @uiscmdrsp: Response from Service Partition
903 * @scsicmd: The scsi command
905 * Response returned by the Service Partition, finish it and send
906 * completion to the scsi midlayer.
910 complete_scsi_command(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
912 /* take what we need out of cmdrsp and complete the scsicmd */
913 scsicmd->result = cmdrsp->scsi.linuxstat;
914 if (cmdrsp->scsi.linuxstat)
915 do_scsi_linuxstat(cmdrsp, scsicmd);
917 do_scsi_nolinuxstat(cmdrsp, scsicmd);
919 scsicmd->scsi_done(scsicmd);
923 * drain_queue - pull responses out of iochannel
924 * @cmdrsp: Response from the IOSP
925 * @devdata: device that owns this iochannel
927 * Pulls responses out of the iochannel and process the responses.
931 drain_queue(struct uiscmdrsp *cmdrsp, struct visorhba_devdata *devdata)
933 struct scsi_cmnd *scsicmd;
936 if (visorchannel_signalremove(devdata->dev->visorchannel,
939 break; /* queue empty */
941 if (cmdrsp->cmdtype == CMD_SCSI_TYPE) {
942 /* scsicmd location is returned by the
945 scsicmd = del_scsipending_ent(devdata,
946 cmdrsp->scsi.handle);
949 /* complete the orig cmd */
950 complete_scsi_command(cmdrsp, scsicmd);
951 } else if (cmdrsp->cmdtype == CMD_SCSITASKMGMT_TYPE) {
952 if (!del_scsipending_ent(devdata,
953 cmdrsp->scsitaskmgmt.handle))
955 complete_taskmgmt_command(&devdata->idr, cmdrsp,
956 cmdrsp->scsitaskmgmt.result);
957 } else if (cmdrsp->cmdtype == CMD_NOTIFYGUEST_TYPE)
958 dev_err_once(&devdata->dev->device,
959 "ignoring unsupported NOTIFYGUEST\n");
960 /* cmdrsp is now available for re-use */
965 * process_incoming_rsps - Process responses from IOSP
966 * @v: void pointer to visorhba_devdata
968 * Main function for the thread that processes the responses
969 * from the IO Service Partition. When the queue is empty, wait
970 * to check to see if it is full again.
972 static int process_incoming_rsps(void *v)
974 struct visorhba_devdata *devdata = v;
975 struct uiscmdrsp *cmdrsp = NULL;
976 const int size = sizeof(*cmdrsp);
978 cmdrsp = kmalloc(size, GFP_ATOMIC);
983 if (kthread_should_stop())
985 wait_event_interruptible_timeout(
986 devdata->rsp_queue, (atomic_read(
987 &devdata->interrupt_rcvd) == 1),
988 msecs_to_jiffies(devdata->thread_wait_ms));
990 drain_queue(cmdrsp, devdata);
997 * visorhba_pause - function to handle visorbus pause messages
998 * @dev: device that is pausing.
999 * @complete_func: function to call when finished
1001 * Something has happened to the IO Service Partition that is
1002 * handling this device. Quiet this device and reset commands
1003 * so that the Service Partition can be corrected.
1006 static int visorhba_pause(struct visor_device *dev,
1007 visorbus_state_complete_func complete_func)
1009 struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
1011 visorhba_serverdown(devdata);
1012 complete_func(dev, 0);
1017 * visorhba_resume - function called when the IO Service Partition is back
1018 * @dev: device that is pausing.
1019 * @complete_func: function to call when finished
1021 * Yay! The IO Service Partition is back, the channel has been wiped
1022 * so lets re-establish connection and start processing responses.
1023 * Returns 0 on success, error on failure.
1025 static int visorhba_resume(struct visor_device *dev,
1026 visorbus_state_complete_func complete_func)
1028 struct visorhba_devdata *devdata;
1030 devdata = dev_get_drvdata(&dev->device);
1034 if (devdata->serverdown && !devdata->serverchangingstate)
1035 devdata->serverchangingstate = true;
1037 devdata->thread = visor_thread_start(process_incoming_rsps, devdata,
1040 devdata->serverdown = false;
1041 devdata->serverchangingstate = false;
1047 * visorhba_probe - device has been discovered, do acquire
1048 * @dev: visor_device that was discovered
1050 * A new HBA was discovered, do the initial connections of it.
1051 * Return 0 on success, otherwise error.
1053 static int visorhba_probe(struct visor_device *dev)
1055 struct Scsi_Host *scsihost;
1056 struct vhba_config_max max;
1057 struct visorhba_devdata *devdata = NULL;
1058 int err, channel_offset;
1061 scsihost = scsi_host_alloc(&visorhba_driver_template,
1066 channel_offset = offsetof(struct spar_io_channel_protocol,
1068 err = visorbus_read_channel(dev, channel_offset, &max,
1069 sizeof(struct vhba_config_max));
1071 goto err_scsi_host_put;
1073 scsihost->max_id = (unsigned int)max.max_id;
1074 scsihost->max_lun = (unsigned int)max.max_lun;
1075 scsihost->cmd_per_lun = (unsigned int)max.cmd_per_lun;
1076 scsihost->max_sectors =
1077 (unsigned short)(max.max_io_size >> 9);
1078 scsihost->sg_tablesize =
1079 (unsigned short)(max.max_io_size / PAGE_SIZE);
1080 if (scsihost->sg_tablesize > MAX_PHYS_INFO)
1081 scsihost->sg_tablesize = MAX_PHYS_INFO;
1082 err = scsi_add_host(scsihost, &dev->device);
1084 goto err_scsi_host_put;
1086 devdata = (struct visorhba_devdata *)scsihost->hostdata;
1088 dev_set_drvdata(&dev->device, devdata);
1090 devdata->debugfs_dir = debugfs_create_dir(dev_name(&dev->device),
1091 visorhba_debugfs_dir);
1092 if (!devdata->debugfs_dir) {
1094 goto err_scsi_remove_host;
1096 devdata->debugfs_info =
1097 debugfs_create_file("info", S_IRUSR | S_IRGRP,
1098 devdata->debugfs_dir, devdata,
1099 &info_debugfs_fops);
1100 if (!devdata->debugfs_info) {
1102 goto err_debugfs_dir;
1105 init_waitqueue_head(&devdata->rsp_queue);
1106 spin_lock_init(&devdata->privlock);
1107 devdata->serverdown = false;
1108 devdata->serverchangingstate = false;
1109 devdata->scsihost = scsihost;
1111 channel_offset = offsetof(struct spar_io_channel_protocol,
1112 channel_header.features);
1113 err = visorbus_read_channel(dev, channel_offset, &features, 8);
1115 goto err_debugfs_info;
1116 features |= ULTRA_IO_CHANNEL_IS_POLLING;
1117 err = visorbus_write_channel(dev, channel_offset, &features, 8);
1119 goto err_debugfs_info;
1121 idr_init(&devdata->idr);
1123 devdata->thread_wait_ms = 2;
1124 devdata->thread = visor_thread_start(process_incoming_rsps, devdata,
1127 scsi_scan_host(scsihost);
1132 debugfs_remove(devdata->debugfs_info);
1135 debugfs_remove_recursive(devdata->debugfs_dir);
1137 err_scsi_remove_host:
1138 scsi_remove_host(scsihost);
1141 scsi_host_put(scsihost);
1146 * visorhba_remove - remove a visorhba device
1147 * @dev: Device to remove
1149 * Removes the visorhba device.
1152 static void visorhba_remove(struct visor_device *dev)
1154 struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
1155 struct Scsi_Host *scsihost = NULL;
1160 scsihost = devdata->scsihost;
1161 visor_thread_stop(devdata->thread);
1162 scsi_remove_host(scsihost);
1163 scsi_host_put(scsihost);
1165 idr_destroy(&devdata->idr);
1167 dev_set_drvdata(&dev->device, NULL);
1168 debugfs_remove(devdata->debugfs_info);
1169 debugfs_remove_recursive(devdata->debugfs_dir);
1172 /* This is used to tell the visor bus driver which types of visor devices
1173 * we support, and what functions to call when a visor device that we support
1174 * is attached or removed.
1176 static struct visor_driver visorhba_driver = {
1178 .owner = THIS_MODULE,
1179 .channel_types = visorhba_channel_types,
1180 .probe = visorhba_probe,
1181 .remove = visorhba_remove,
1182 .pause = visorhba_pause,
1183 .resume = visorhba_resume,
1184 .channel_interrupt = NULL,
1188 * visorhba_init - driver init routine
1190 * Initialize the visorhba driver and register it with visorbus
1191 * to handle s-Par virtual host bus adapter.
1193 static int visorhba_init(void)
1197 visorhba_debugfs_dir = debugfs_create_dir("visorhba", NULL);
1198 if (!visorhba_debugfs_dir)
1201 rc = visorbus_register_visor_driver(&visorhba_driver);
1203 goto cleanup_debugfs;
1208 debugfs_remove_recursive(visorhba_debugfs_dir);
1214 * visorhba_cleanup - driver exit routine
1216 * Unregister driver from the bus and free up memory.
1218 static void visorhba_exit(void)
1220 visorbus_unregister_visor_driver(&visorhba_driver);
1221 debugfs_remove_recursive(visorhba_debugfs_dir);
1224 module_init(visorhba_init);
1225 module_exit(visorhba_exit);
1227 MODULE_AUTHOR("Unisys");
1228 MODULE_LICENSE("GPL");
1229 MODULE_DESCRIPTION("s-Par HBA driver for virtual SCSI host busses");