2 * Copyright (c) 2012 - 2015 UNISYS CORPORATION
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or (at
8 * your option) any later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
17 #include <linux/debugfs.h>
18 #include <linux/kthread.h>
19 #include <linux/idr.h>
20 #include <linux/seq_file.h>
21 #include <scsi/scsi.h>
22 #include <scsi/scsi_host.h>
23 #include <scsi/scsi_cmnd.h>
24 #include <scsi/scsi_device.h>
27 #include "iochannel.h"
29 /* The Send and Receive Buffers of the IO Queue may both be full */
31 #define IOS_ERROR_THRESHOLD 1000
32 #define MAX_PENDING_REQUESTS (MIN_NUMSIGNALS * 2)
33 #define VISORHBA_ERROR_COUNT 30
35 static struct dentry *visorhba_debugfs_dir;
37 /* GUIDS for HBA channel type supported by this driver */
38 static struct visor_channeltype_descriptor visorhba_channel_types[] = {
39 /* Note that the only channel type we expect to be reported by the
40 * bus driver is the VISOR_VHBA channel.
42 { VISOR_VHBA_CHANNEL_GUID, "sparvhba" },
46 MODULE_DEVICE_TABLE(visorbus, visorhba_channel_types);
47 MODULE_ALIAS("visorbus:" VISOR_VHBA_CHANNEL_GUID_STR);
49 struct visordisk_info {
50 struct scsi_device *sdev;
52 atomic_t ios_threshold;
54 struct visordisk_info *next;
58 struct uiscmdrsp cmdrsp;
59 /* The Data being tracked */
61 /* Type of pointer that is being stored */
65 /* Each scsi_host has a host_data area that contains this struct. */
66 struct visorhba_devdata {
67 struct Scsi_Host *scsihost;
68 struct visor_device *dev;
69 struct list_head dev_info_list;
70 /* Tracks the requests that have been forwarded to
71 * the IOVM and haven't returned yet
73 struct scsipending pending[MAX_PENDING_REQUESTS];
74 /* Start search for next pending free slot here */
75 unsigned int nextinsert;
76 /* lock to protect data in devdata */
79 bool serverchangingstate;
80 unsigned long long acquire_failed_cnt;
81 unsigned long long interrupts_rcvd;
82 unsigned long long interrupts_notme;
83 unsigned long long interrupts_disabled;
84 u64 __iomem *flags_addr;
85 atomic_t interrupt_rcvd;
86 wait_queue_head_t rsp_queue;
87 struct visordisk_info head;
88 unsigned int max_buff_len;
90 struct task_struct *thread;
94 * allows us to pass int handles back-and-forth between us and
95 * iovm, instead of raw pointers
99 struct dentry *debugfs_dir;
100 struct dentry *debugfs_info;
103 struct visorhba_devices_open {
104 struct visorhba_devdata *devdata;
108 * visor_thread_start - Starts a thread for the device
109 * @threadfn: Function the thread starts
110 * @thrcontext: Context to pass to the thread, i.e. devdata
111 * @name: String describing name of thread
113 * Starts a thread for the device.
115 * Return: The task_struct * denoting the thread on success,
118 static struct task_struct *visor_thread_start(int (*threadfn)(void *),
119 void *thrcontext, char *name)
121 struct task_struct *task;
123 task = kthread_run(threadfn, thrcontext, "%s", name);
125 pr_err("visorbus failed to start thread\n");
132 * visor_thread_stop - Stops the thread if it is running
133 * @task: Description of process to stop
135 static void visor_thread_stop(struct task_struct *task)
141 * add_scsipending_entry - Save off io command that is pending in
143 * @devdata: Pointer to devdata
144 * @cmdtype: Specifies the type of command pending
145 * @new: The command to be saved
147 * Saves off the io command that is being handled by the Service
148 * Partition so that it can be handled when it completes. If new is
149 * NULL it is assumed the entry refers only to the cmdrsp.
151 * Return: Insert_location where entry was added on success,
154 static int add_scsipending_entry(struct visorhba_devdata *devdata,
155 char cmdtype, void *new)
158 struct scsipending *entry;
161 spin_lock_irqsave(&devdata->privlock, flags);
162 insert_location = devdata->nextinsert;
163 while (devdata->pending[insert_location].sent) {
164 insert_location = (insert_location + 1) % MAX_PENDING_REQUESTS;
165 if (insert_location == (int)devdata->nextinsert) {
166 spin_unlock_irqrestore(&devdata->privlock, flags);
171 entry = &devdata->pending[insert_location];
172 memset(&entry->cmdrsp, 0, sizeof(entry->cmdrsp));
173 entry->cmdtype = cmdtype;
176 /* wants to send cmdrsp */
178 entry->sent = &entry->cmdrsp;
179 devdata->nextinsert = (insert_location + 1) % MAX_PENDING_REQUESTS;
180 spin_unlock_irqrestore(&devdata->privlock, flags);
182 return insert_location;
186 * del_scsipending_ent - Removes an entry from the pending array
187 * @devdata: Device holding the pending array
188 * @del: Entry to remove
190 * Removes the entry pointed at by del and returns it.
192 * Return: The scsipending entry pointed to on success, NULL on failure
194 static void *del_scsipending_ent(struct visorhba_devdata *devdata, int del)
199 if (del >= MAX_PENDING_REQUESTS)
202 spin_lock_irqsave(&devdata->privlock, flags);
203 sent = devdata->pending[del].sent;
204 devdata->pending[del].cmdtype = 0;
205 devdata->pending[del].sent = NULL;
206 spin_unlock_irqrestore(&devdata->privlock, flags);
212 * get_scsipending_cmdrsp - Return the cmdrsp stored in a pending entry
213 * @ddata: Device holding the pending array
214 * @ent: Entry that stores the cmdrsp
216 * Each scsipending entry has a cmdrsp in it. The cmdrsp is only valid
217 * if the "sent" field is not NULL.
219 * Return: A pointer to the cmdrsp, NULL on failure
221 static struct uiscmdrsp *get_scsipending_cmdrsp(struct visorhba_devdata *ddata,
224 if (ddata->pending[ent].sent)
225 return &ddata->pending[ent].cmdrsp;
231 * simple_idr_get - Associate a provided pointer with an int value
232 * 1 <= value <= INT_MAX, and return this int value;
233 * the pointer value can be obtained later by passing
234 * this int value to idr_find()
235 * @idrtable: The data object maintaining the pointer<-->int mappings
236 * @p: The pointer value to be remembered
237 * @lock: A spinlock used when exclusive access to idrtable is needed
239 * Return: The id number mapped to pointer 'p', 0 on failure
241 static unsigned int simple_idr_get(struct idr *idrtable, void *p,
247 idr_preload(GFP_KERNEL);
248 spin_lock_irqsave(lock, flags);
249 id = idr_alloc(idrtable, p, 1, INT_MAX, GFP_NOWAIT);
250 spin_unlock_irqrestore(lock, flags);
255 /* idr_alloc() guarantees > 0 */
256 return (unsigned int)(id);
260 * setup_scsitaskmgmt_handles - Stash the necessary handles so that the
261 * completion processing logic for a taskmgmt
262 * cmd will be able to find who to wake up
263 * and where to stash the result
264 * @idrtable: The data object maintaining the pointer<-->int mappings
265 * @lock: A spinlock used when exclusive access to idrtable is needed
266 * @cmdrsp: Response from the IOVM
267 * @event: The event handle to associate with an id
268 * @result: The location to place the result of the event handle into
270 static void setup_scsitaskmgmt_handles(struct idr *idrtable, spinlock_t *lock,
271 struct uiscmdrsp *cmdrsp,
272 wait_queue_head_t *event, int *result)
274 /* specify the event that has to be triggered when this */
275 /* cmd is complete */
276 cmdrsp->scsitaskmgmt.notify_handle =
277 simple_idr_get(idrtable, event, lock);
278 cmdrsp->scsitaskmgmt.notifyresult_handle =
279 simple_idr_get(idrtable, result, lock);
283 * cleanup_scsitaskmgmt_handles - Forget handles created by
284 * setup_scsitaskmgmt_handles()
285 * @idrtable: The data object maintaining the pointer<-->int mappings
286 * @cmdrsp: Response from the IOVM
288 static void cleanup_scsitaskmgmt_handles(struct idr *idrtable,
289 struct uiscmdrsp *cmdrsp)
291 if (cmdrsp->scsitaskmgmt.notify_handle)
292 idr_remove(idrtable, cmdrsp->scsitaskmgmt.notify_handle);
293 if (cmdrsp->scsitaskmgmt.notifyresult_handle)
294 idr_remove(idrtable, cmdrsp->scsitaskmgmt.notifyresult_handle);
298 * forward_taskmgmt_command - Send taskmegmt command to the Service
300 * @tasktype: Type of taskmgmt command
301 * @scsidev: Scsidev that issued command
303 * Create a cmdrsp packet and send it to the Serivce Partition
304 * that will service this request.
306 * Return: Int representing whether command was queued successfully or not
308 static int forward_taskmgmt_command(enum task_mgmt_types tasktype,
309 struct scsi_device *scsidev)
311 struct uiscmdrsp *cmdrsp;
312 struct visorhba_devdata *devdata =
313 (struct visorhba_devdata *)scsidev->host->hostdata;
314 int notifyresult = 0xffff;
315 wait_queue_head_t notifyevent;
318 if (devdata->serverdown || devdata->serverchangingstate)
321 scsicmd_id = add_scsipending_entry(devdata, CMD_SCSITASKMGMT_TYPE,
326 cmdrsp = get_scsipending_cmdrsp(devdata, scsicmd_id);
328 init_waitqueue_head(¬ifyevent);
330 /* issue TASK_MGMT_ABORT_TASK */
331 cmdrsp->cmdtype = CMD_SCSITASKMGMT_TYPE;
332 setup_scsitaskmgmt_handles(&devdata->idr, &devdata->privlock, cmdrsp,
333 ¬ifyevent, ¬ifyresult);
335 /* save destination */
336 cmdrsp->scsitaskmgmt.tasktype = tasktype;
337 cmdrsp->scsitaskmgmt.vdest.channel = scsidev->channel;
338 cmdrsp->scsitaskmgmt.vdest.id = scsidev->id;
339 cmdrsp->scsitaskmgmt.vdest.lun = scsidev->lun;
340 cmdrsp->scsitaskmgmt.handle = scsicmd_id;
342 dev_dbg(&scsidev->sdev_gendev,
343 "visorhba: initiating type=%d taskmgmt command\n", tasktype);
344 if (visorchannel_signalinsert(devdata->dev->visorchannel,
347 goto err_del_scsipending_ent;
349 /* It can take the Service Partition up to 35 seconds to complete
350 * an IO in some cases, so wait 45 seconds and error out
352 if (!wait_event_timeout(notifyevent, notifyresult != 0xffff,
353 msecs_to_jiffies(45000)))
354 goto err_del_scsipending_ent;
356 dev_dbg(&scsidev->sdev_gendev,
357 "visorhba: taskmgmt type=%d success; result=0x%x\n",
358 tasktype, notifyresult);
359 cleanup_scsitaskmgmt_handles(&devdata->idr, cmdrsp);
362 err_del_scsipending_ent:
363 dev_dbg(&scsidev->sdev_gendev,
364 "visorhba: taskmgmt type=%d not executed\n", tasktype);
365 del_scsipending_ent(devdata, scsicmd_id);
366 cleanup_scsitaskmgmt_handles(&devdata->idr, cmdrsp);
371 * visorhba_abort_handler - Send TASK_MGMT_ABORT_TASK
372 * @scsicmd: The scsicmd that needs aborted
374 * Return: SUCCESS if inserted, FAILED otherwise
376 static int visorhba_abort_handler(struct scsi_cmnd *scsicmd)
378 /* issue TASK_MGMT_ABORT_TASK */
379 struct scsi_device *scsidev;
380 struct visordisk_info *vdisk;
383 scsidev = scsicmd->device;
384 vdisk = scsidev->hostdata;
385 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
386 atomic_inc(&vdisk->error_count);
388 atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
389 rtn = forward_taskmgmt_command(TASK_MGMT_ABORT_TASK, scsidev);
390 if (rtn == SUCCESS) {
391 scsicmd->result = DID_ABORT << 16;
392 scsicmd->scsi_done(scsicmd);
398 * visorhba_device_reset_handler - Send TASK_MGMT_LUN_RESET
399 * @scsicmd: The scsicmd that needs aborted
401 * Return: SUCCESS if inserted, FAILED otherwise
403 static int visorhba_device_reset_handler(struct scsi_cmnd *scsicmd)
405 /* issue TASK_MGMT_LUN_RESET */
406 struct scsi_device *scsidev;
407 struct visordisk_info *vdisk;
410 scsidev = scsicmd->device;
411 vdisk = scsidev->hostdata;
412 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
413 atomic_inc(&vdisk->error_count);
415 atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
416 rtn = forward_taskmgmt_command(TASK_MGMT_LUN_RESET, scsidev);
417 if (rtn == SUCCESS) {
418 scsicmd->result = DID_RESET << 16;
419 scsicmd->scsi_done(scsicmd);
425 * visorhba_bus_reset_handler - Send TASK_MGMT_TARGET_RESET for each
427 * @scsicmd: The scsicmd that needs aborted
429 * Return: SUCCESS if inserted, FAILED otherwise
431 static int visorhba_bus_reset_handler(struct scsi_cmnd *scsicmd)
433 struct scsi_device *scsidev;
434 struct visordisk_info *vdisk;
437 scsidev = scsicmd->device;
438 shost_for_each_device(scsidev, scsidev->host) {
439 vdisk = scsidev->hostdata;
440 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
441 atomic_inc(&vdisk->error_count);
443 atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
445 rtn = forward_taskmgmt_command(TASK_MGMT_BUS_RESET, scsidev);
446 if (rtn == SUCCESS) {
447 scsicmd->result = DID_RESET << 16;
448 scsicmd->scsi_done(scsicmd);
454 * visorhba_host_reset_handler - Not supported
455 * @scsicmd: The scsicmd that needs to be aborted
457 * Return: Not supported, return SUCCESS
459 static int visorhba_host_reset_handler(struct scsi_cmnd *scsicmd)
461 /* issue TASK_MGMT_TARGET_RESET for each target on each bus for host */
466 * visorhba_get_info - Get information about SCSI device
467 * @shp: Scsi host that is requesting information
469 * Return: String with visorhba information
471 static const char *visorhba_get_info(struct Scsi_Host *shp)
473 /* Return version string */
478 * dma_data_dir_linux_to_spar - convert dma_data_direction value to
479 * Unisys-specific equivalent
480 * @d: dma direction value to convert
482 * Returns the Unisys-specific dma direction value corresponding to @d
484 static u32 dma_data_dir_linux_to_spar(enum dma_data_direction d)
487 case DMA_BIDIRECTIONAL:
488 return UIS_DMA_BIDIRECTIONAL;
490 return UIS_DMA_TO_DEVICE;
491 case DMA_FROM_DEVICE:
492 return UIS_DMA_FROM_DEVICE;
501 * visorhba_queue_command_lck - Queues command to the Service Partition
502 * @scsicmd: Command to be queued
503 * @vsiorhba_cmnd_done: Done command to call when scsicmd is returned
505 * Queues to scsicmd to the ServicePartition after converting it to a
506 * uiscmdrsp structure.
508 * Return: 0 if successfully queued to the Service Partition, otherwise
511 static int visorhba_queue_command_lck(struct scsi_cmnd *scsicmd,
512 void (*visorhba_cmnd_done)
513 (struct scsi_cmnd *))
515 struct uiscmdrsp *cmdrsp;
516 struct scsi_device *scsidev = scsicmd->device;
518 unsigned char *cdb = scsicmd->cmnd;
519 struct Scsi_Host *scsihost = scsidev->host;
521 struct visorhba_devdata *devdata =
522 (struct visorhba_devdata *)scsihost->hostdata;
523 struct scatterlist *sg = NULL;
524 struct scatterlist *sglist = NULL;
526 if (devdata->serverdown || devdata->serverchangingstate)
527 return SCSI_MLQUEUE_DEVICE_BUSY;
529 insert_location = add_scsipending_entry(devdata, CMD_SCSI_TYPE,
531 if (insert_location < 0)
532 return SCSI_MLQUEUE_DEVICE_BUSY;
534 cmdrsp = get_scsipending_cmdrsp(devdata, insert_location);
535 cmdrsp->cmdtype = CMD_SCSI_TYPE;
536 /* save the pending insertion location. Deletion from pending
537 * will return the scsicmd pointer for completion
539 cmdrsp->scsi.handle = insert_location;
541 /* save done function that we have call when cmd is complete */
542 scsicmd->scsi_done = visorhba_cmnd_done;
543 /* save destination */
544 cmdrsp->scsi.vdest.channel = scsidev->channel;
545 cmdrsp->scsi.vdest.id = scsidev->id;
546 cmdrsp->scsi.vdest.lun = scsidev->lun;
548 cmdrsp->scsi.data_dir =
549 dma_data_dir_linux_to_spar(scsicmd->sc_data_direction);
550 memcpy(cmdrsp->scsi.cmnd, cdb, MAX_CMND_SIZE);
551 cmdrsp->scsi.bufflen = scsi_bufflen(scsicmd);
553 /* keep track of the max buffer length so far. */
554 if (cmdrsp->scsi.bufflen > devdata->max_buff_len)
555 devdata->max_buff_len = cmdrsp->scsi.bufflen;
557 if (scsi_sg_count(scsicmd) > MAX_PHYS_INFO)
558 goto err_del_scsipending_ent;
560 /* convert buffer to phys information */
561 /* buffer is scatterlist - copy it out */
562 sglist = scsi_sglist(scsicmd);
564 for_each_sg(sglist, sg, scsi_sg_count(scsicmd), i) {
565 cmdrsp->scsi.gpi_list[i].address = sg_phys(sg);
566 cmdrsp->scsi.gpi_list[i].length = sg->length;
568 cmdrsp->scsi.guest_phys_entries = scsi_sg_count(scsicmd);
570 if (visorchannel_signalinsert(devdata->dev->visorchannel,
573 /* queue must be full and we aren't going to wait */
574 goto err_del_scsipending_ent;
578 err_del_scsipending_ent:
579 del_scsipending_ent(devdata, insert_location);
580 return SCSI_MLQUEUE_DEVICE_BUSY;
584 static DEF_SCSI_QCMD(visorhba_queue_command)
586 #define visorhba_queue_command visorhba_queue_command_lck
590 * visorhba_slave_alloc - Called when new disk is discovered
593 * Create a new visordisk_info structure and add it to our
596 * Return: 0 on success, -ENOMEM on failure.
598 static int visorhba_slave_alloc(struct scsi_device *scsidev)
600 /* this is called by the midlayer before scan for new devices --
601 * LLD can alloc any struct & do init if needed.
603 struct visordisk_info *vdisk;
604 struct visorhba_devdata *devdata;
605 struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host;
607 /* already allocated return success */
608 if (scsidev->hostdata)
611 /* even though we errored, treat as success */
612 devdata = (struct visorhba_devdata *)scsihost->hostdata;
616 vdisk = kzalloc(sizeof(*vdisk), GFP_ATOMIC);
620 vdisk->sdev = scsidev;
621 scsidev->hostdata = vdisk;
626 * visorhba_slave_destroy - Disk is going away, clean up resources.
627 * @scsidev: Scsi device to destroy
629 static void visorhba_slave_destroy(struct scsi_device *scsidev)
631 /* midlevel calls this after device has been quiesced and
632 * before it is to be deleted.
634 struct visordisk_info *vdisk;
636 vdisk = scsidev->hostdata;
637 scsidev->hostdata = NULL;
641 static struct scsi_host_template visorhba_driver_template = {
642 .name = "Unisys Visor HBA",
643 .info = visorhba_get_info,
644 .queuecommand = visorhba_queue_command,
645 .eh_abort_handler = visorhba_abort_handler,
646 .eh_device_reset_handler = visorhba_device_reset_handler,
647 .eh_bus_reset_handler = visorhba_bus_reset_handler,
648 .eh_host_reset_handler = visorhba_host_reset_handler,
650 #define visorhba_MAX_CMNDS 128
651 .can_queue = visorhba_MAX_CMNDS,
654 .slave_alloc = visorhba_slave_alloc,
655 .slave_destroy = visorhba_slave_destroy,
656 .use_clustering = ENABLE_CLUSTERING,
660 * info_debugfs_show - Debugfs interface to dump visorhba states
661 * @seq: The sequence file to write information to
662 * @v: Unused, but needed for use with seq file single_open invocation
664 * Presents a file in the debugfs tree named: /visorhba/vbus<x>:dev<y>/info.
668 static int info_debugfs_show(struct seq_file *seq, void *v)
670 struct visorhba_devdata *devdata = seq->private;
672 seq_printf(seq, "max_buff_len = %u\n", devdata->max_buff_len);
673 seq_printf(seq, "interrupts_rcvd = %llu\n", devdata->interrupts_rcvd);
674 seq_printf(seq, "interrupts_disabled = %llu\n",
675 devdata->interrupts_disabled);
676 seq_printf(seq, "interrupts_notme = %llu\n",
677 devdata->interrupts_notme);
678 seq_printf(seq, "flags_addr = %p\n", devdata->flags_addr);
679 if (devdata->flags_addr) {
680 u64 phys_flags_addr =
681 virt_to_phys((__force void *)devdata->flags_addr);
682 seq_printf(seq, "phys_flags_addr = 0x%016llx\n",
684 seq_printf(seq, "FeatureFlags = %llu\n",
685 (u64)readq(devdata->flags_addr));
687 seq_printf(seq, "acquire_failed_cnt = %llu\n",
688 devdata->acquire_failed_cnt);
693 static int info_debugfs_open(struct inode *inode, struct file *file)
695 return single_open(file, info_debugfs_show, inode->i_private);
698 static const struct file_operations info_debugfs_fops = {
699 .owner = THIS_MODULE,
700 .open = info_debugfs_open,
703 .release = single_release,
707 * complete_taskmgmt_command - Complete task management
708 * @idrtable: The data object maintaining the pointer<-->int mappings
709 * @cmdrsp: Response from the IOVM
710 * @result: The result of the task management command
712 * Service Partition returned the result of the task management
713 * command. Wake up anyone waiting for it.
715 static void complete_taskmgmt_command(struct idr *idrtable,
716 struct uiscmdrsp *cmdrsp, int result)
718 wait_queue_head_t *wq =
719 idr_find(idrtable, cmdrsp->scsitaskmgmt.notify_handle);
720 int *scsi_result_ptr =
721 idr_find(idrtable, cmdrsp->scsitaskmgmt.notifyresult_handle);
722 if (unlikely(!(wq && scsi_result_ptr))) {
723 pr_err("visorhba: no completion context; cmd will time out\n");
727 /* copy the result of the taskmgmt and
728 * wake up the error handler that is waiting for this
730 pr_debug("visorhba: notifying initiator with result=0x%x\n", result);
731 *scsi_result_ptr = result;
736 * visorhba_serverdown_complete - Called when we are done cleaning up
738 * @devdata: Visorhba instance on which to complete serverdown
740 * Called when we are done cleanning up from serverdown, stop processing
741 * queue, fail pending IOs.
743 static void visorhba_serverdown_complete(struct visorhba_devdata *devdata)
746 struct scsipending *pendingdel = NULL;
747 struct scsi_cmnd *scsicmd = NULL;
748 struct uiscmdrsp *cmdrsp;
751 /* Stop using the IOVM response queue (queue should be drained
754 visor_thread_stop(devdata->thread);
756 /* Fail commands that weren't completed */
757 spin_lock_irqsave(&devdata->privlock, flags);
758 for (i = 0; i < MAX_PENDING_REQUESTS; i++) {
759 pendingdel = &devdata->pending[i];
760 switch (pendingdel->cmdtype) {
762 scsicmd = pendingdel->sent;
763 scsicmd->result = DID_RESET << 16;
764 if (scsicmd->scsi_done)
765 scsicmd->scsi_done(scsicmd);
767 case CMD_SCSITASKMGMT_TYPE:
768 cmdrsp = pendingdel->sent;
769 complete_taskmgmt_command(&devdata->idr, cmdrsp,
775 pendingdel->cmdtype = 0;
776 pendingdel->sent = NULL;
778 spin_unlock_irqrestore(&devdata->privlock, flags);
780 devdata->serverdown = true;
781 devdata->serverchangingstate = false;
785 * visorhba_serverdown - Got notified that the IOVM is down
786 * @devdata: Visorhba that is being serviced by downed IOVM
788 * Something happened to the IOVM, return immediately and
789 * schedule cleanup work.
791 * Return: 0 on success, -EINVAL on failure
793 static int visorhba_serverdown(struct visorhba_devdata *devdata)
795 if (!devdata->serverdown && !devdata->serverchangingstate) {
796 devdata->serverchangingstate = true;
797 visorhba_serverdown_complete(devdata);
798 } else if (devdata->serverchangingstate) {
805 * do_scsi_linuxstat - Scsi command returned linuxstat
806 * @cmdrsp: Response from IOVM
807 * @scsicmd: Command issued
809 * Don't log errors for disk-not-present inquiries.
811 static void do_scsi_linuxstat(struct uiscmdrsp *cmdrsp,
812 struct scsi_cmnd *scsicmd)
814 struct visordisk_info *vdisk;
815 struct scsi_device *scsidev;
817 scsidev = scsicmd->device;
818 memcpy(scsicmd->sense_buffer, cmdrsp->scsi.sensebuf, MAX_SENSE_SIZE);
820 /* Do not log errors for disk-not-present inquiries */
821 if ((cmdrsp->scsi.cmnd[0] == INQUIRY) &&
822 (host_byte(cmdrsp->scsi.linuxstat) == DID_NO_CONNECT) &&
823 (cmdrsp->scsi.addlstat == ADDL_SEL_TIMEOUT))
825 /* Okay see what our error_count is here.... */
826 vdisk = scsidev->hostdata;
827 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT) {
828 atomic_inc(&vdisk->error_count);
829 atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
833 static int set_no_disk_inquiry_result(unsigned char *buf, size_t len,
836 if (len < NO_DISK_INQUIRY_RESULT_LEN)
838 memset(buf, 0, NO_DISK_INQUIRY_RESULT_LEN);
839 buf[2] = SCSI_SPC2_VER;
841 buf[0] = DEV_DISK_CAPABLE_NOT_PRESENT;
842 buf[3] = DEV_HISUPPORT;
844 buf[0] = DEV_NOT_CAPABLE;
846 buf[4] = NO_DISK_INQUIRY_RESULT_LEN - 5;
847 strncpy(buf + 8, "DELLPSEUDO DEVICE .", NO_DISK_INQUIRY_RESULT_LEN - 8);
852 * do_scsi_nolinuxstat - Scsi command didn't have linuxstat
853 * @cmdrsp: Response from IOVM
854 * @scsicmd: Command issued
856 * Handle response when no linuxstat was returned.
858 static void do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp,
859 struct scsi_cmnd *scsicmd)
861 struct scsi_device *scsidev;
863 struct scatterlist *sg;
866 char *this_page_orig;
868 struct visordisk_info *vdisk;
870 scsidev = scsicmd->device;
871 if ((cmdrsp->scsi.cmnd[0] == INQUIRY) &&
872 (cmdrsp->scsi.bufflen >= MIN_INQUIRY_RESULT_LEN)) {
873 if (cmdrsp->scsi.no_disk_result == 0)
876 buf = kzalloc(sizeof(char) * 36, GFP_KERNEL);
880 /* Linux scsi code wants a device at Lun 0
881 * to issue report luns, but we don't want
882 * a disk there so we'll present a processor
885 set_no_disk_inquiry_result(buf, (size_t)cmdrsp->scsi.bufflen,
888 if (scsi_sg_count(scsicmd) == 0) {
889 memcpy(scsi_sglist(scsicmd), buf,
890 cmdrsp->scsi.bufflen);
895 sg = scsi_sglist(scsicmd);
896 for (i = 0; i < scsi_sg_count(scsicmd); i++) {
897 this_page_orig = kmap_atomic(sg_page(sg + i));
898 this_page = (void *)((unsigned long)this_page_orig |
900 memcpy(this_page, buf + bufind, sg[i].length);
901 kunmap_atomic(this_page_orig);
905 vdisk = scsidev->hostdata;
906 if (atomic_read(&vdisk->ios_threshold) > 0) {
907 atomic_dec(&vdisk->ios_threshold);
908 if (atomic_read(&vdisk->ios_threshold) == 0)
909 atomic_set(&vdisk->error_count, 0);
915 * complete_scsi_command - Complete a scsi command
916 * @uiscmdrsp: Response from Service Partition
917 * @scsicmd: The scsi command
919 * Response was returned by the Service Partition. Finish it and send
920 * completion to the scsi midlayer.
922 static void complete_scsi_command(struct uiscmdrsp *cmdrsp,
923 struct scsi_cmnd *scsicmd)
925 /* take what we need out of cmdrsp and complete the scsicmd */
926 scsicmd->result = cmdrsp->scsi.linuxstat;
927 if (cmdrsp->scsi.linuxstat)
928 do_scsi_linuxstat(cmdrsp, scsicmd);
930 do_scsi_nolinuxstat(cmdrsp, scsicmd);
932 scsicmd->scsi_done(scsicmd);
936 * drain_queue - Pull responses out of iochannel
937 * @cmdrsp: Response from the IOSP
938 * @devdata: Device that owns this iochannel
940 * Pulls responses out of the iochannel and process the responses.
942 static void drain_queue(struct uiscmdrsp *cmdrsp,
943 struct visorhba_devdata *devdata)
945 struct scsi_cmnd *scsicmd;
949 if (visorchannel_signalremove(devdata->dev->visorchannel,
953 if (cmdrsp->cmdtype == CMD_SCSI_TYPE) {
954 /* scsicmd location is returned by the
957 scsicmd = del_scsipending_ent(devdata,
958 cmdrsp->scsi.handle);
961 /* complete the orig cmd */
962 complete_scsi_command(cmdrsp, scsicmd);
963 } else if (cmdrsp->cmdtype == CMD_SCSITASKMGMT_TYPE) {
964 if (!del_scsipending_ent(devdata,
965 cmdrsp->scsitaskmgmt.handle))
967 complete_taskmgmt_command(&devdata->idr, cmdrsp,
968 cmdrsp->scsitaskmgmt.result);
969 } else if (cmdrsp->cmdtype == CMD_NOTIFYGUEST_TYPE)
970 dev_err_once(&devdata->dev->device,
971 "ignoring unsupported NOTIFYGUEST\n");
972 /* cmdrsp is now available for re-use */
977 * process_incoming_rsps - Process responses from IOSP
978 * @v: Void pointer to visorhba_devdata
980 * Main function for the thread that processes the responses
981 * from the IO Service Partition. When the queue is empty, wait
982 * to check to see if it is full again.
984 * Return: 0 on success, -ENOMEM on failure
986 static int process_incoming_rsps(void *v)
988 struct visorhba_devdata *devdata = v;
989 struct uiscmdrsp *cmdrsp = NULL;
990 const int size = sizeof(*cmdrsp);
992 cmdrsp = kmalloc(size, GFP_ATOMIC);
997 if (kthread_should_stop())
999 wait_event_interruptible_timeout(
1000 devdata->rsp_queue, (atomic_read(
1001 &devdata->interrupt_rcvd) == 1),
1002 msecs_to_jiffies(devdata->thread_wait_ms));
1004 drain_queue(cmdrsp, devdata);
1011 * visorhba_pause - Function to handle visorbus pause messages
1012 * @dev: Device that is pausing
1013 * @complete_func: Function to call when finished
1015 * Something has happened to the IO Service Partition that is
1016 * handling this device. Quiet this device and reset commands
1017 * so that the Service Partition can be corrected.
1021 static int visorhba_pause(struct visor_device *dev,
1022 visorbus_state_complete_func complete_func)
1024 struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
1026 visorhba_serverdown(devdata);
1027 complete_func(dev, 0);
1032 * visorhba_resume - Function called when the IO Service Partition is back
1033 * @dev: Device that is pausing
1034 * @complete_func: Function to call when finished
1036 * Yay! The IO Service Partition is back, the channel has been wiped
1037 * so lets re-establish connection and start processing responses.
1039 * Return: 0 on success, -EINVAL on failure
1041 static int visorhba_resume(struct visor_device *dev,
1042 visorbus_state_complete_func complete_func)
1044 struct visorhba_devdata *devdata;
1046 devdata = dev_get_drvdata(&dev->device);
1050 if (devdata->serverdown && !devdata->serverchangingstate)
1051 devdata->serverchangingstate = true;
1053 devdata->thread = visor_thread_start(process_incoming_rsps, devdata,
1055 devdata->serverdown = false;
1056 devdata->serverchangingstate = false;
1062 * visorhba_probe - Device has been discovered; do acquire
1063 * @dev: visor_device that was discovered
1065 * A new HBA was discovered; do the initial connections of it.
1067 * Return: 0 on success, otherwise error code
1069 static int visorhba_probe(struct visor_device *dev)
1071 struct Scsi_Host *scsihost;
1072 struct vhba_config_max max;
1073 struct visorhba_devdata *devdata = NULL;
1074 int err, channel_offset;
1077 scsihost = scsi_host_alloc(&visorhba_driver_template,
1082 channel_offset = offsetof(struct visor_io_channel, vhba.max);
1083 err = visorbus_read_channel(dev, channel_offset, &max,
1084 sizeof(struct vhba_config_max));
1086 goto err_scsi_host_put;
1088 scsihost->max_id = (unsigned int)max.max_id;
1089 scsihost->max_lun = (unsigned int)max.max_lun;
1090 scsihost->cmd_per_lun = (unsigned int)max.cmd_per_lun;
1091 scsihost->max_sectors =
1092 (unsigned short)(max.max_io_size >> 9);
1093 scsihost->sg_tablesize =
1094 (unsigned short)(max.max_io_size / PAGE_SIZE);
1095 if (scsihost->sg_tablesize > MAX_PHYS_INFO)
1096 scsihost->sg_tablesize = MAX_PHYS_INFO;
1097 err = scsi_add_host(scsihost, &dev->device);
1099 goto err_scsi_host_put;
1101 devdata = (struct visorhba_devdata *)scsihost->hostdata;
1103 dev_set_drvdata(&dev->device, devdata);
1105 devdata->debugfs_dir = debugfs_create_dir(dev_name(&dev->device),
1106 visorhba_debugfs_dir);
1107 if (!devdata->debugfs_dir) {
1109 goto err_scsi_remove_host;
1111 devdata->debugfs_info =
1112 debugfs_create_file("info", 0440,
1113 devdata->debugfs_dir, devdata,
1114 &info_debugfs_fops);
1115 if (!devdata->debugfs_info) {
1117 goto err_debugfs_dir;
1120 init_waitqueue_head(&devdata->rsp_queue);
1121 spin_lock_init(&devdata->privlock);
1122 devdata->serverdown = false;
1123 devdata->serverchangingstate = false;
1124 devdata->scsihost = scsihost;
1126 channel_offset = offsetof(struct visor_io_channel,
1127 channel_header.features);
1128 err = visorbus_read_channel(dev, channel_offset, &features, 8);
1130 goto err_debugfs_info;
1131 features |= VISOR_CHANNEL_IS_POLLING;
1132 err = visorbus_write_channel(dev, channel_offset, &features, 8);
1134 goto err_debugfs_info;
1136 idr_init(&devdata->idr);
1138 devdata->thread_wait_ms = 2;
1139 devdata->thread = visor_thread_start(process_incoming_rsps, devdata,
1142 scsi_scan_host(scsihost);
1147 debugfs_remove(devdata->debugfs_info);
1150 debugfs_remove_recursive(devdata->debugfs_dir);
1152 err_scsi_remove_host:
1153 scsi_remove_host(scsihost);
1156 scsi_host_put(scsihost);
1161 * visorhba_remove - Remove a visorhba device
1162 * @dev: Device to remove
1164 * Removes the visorhba device.
1166 static void visorhba_remove(struct visor_device *dev)
1168 struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
1169 struct Scsi_Host *scsihost = NULL;
1174 scsihost = devdata->scsihost;
1175 visor_thread_stop(devdata->thread);
1176 scsi_remove_host(scsihost);
1177 scsi_host_put(scsihost);
1179 idr_destroy(&devdata->idr);
1181 dev_set_drvdata(&dev->device, NULL);
1182 debugfs_remove(devdata->debugfs_info);
1183 debugfs_remove_recursive(devdata->debugfs_dir);
1186 /* This is used to tell the visorbus driver which types of visor devices
1187 * we support, and what functions to call when a visor device that we support
1188 * is attached or removed.
1190 static struct visor_driver visorhba_driver = {
1192 .owner = THIS_MODULE,
1193 .channel_types = visorhba_channel_types,
1194 .probe = visorhba_probe,
1195 .remove = visorhba_remove,
1196 .pause = visorhba_pause,
1197 .resume = visorhba_resume,
1198 .channel_interrupt = NULL,
1202 * visorhba_init - Driver init routine
1204 * Initialize the visorhba driver and register it with visorbus
1205 * to handle s-Par virtual host bus adapter.
1207 * Return: 0 on success, error code otherwise
1209 static int visorhba_init(void)
1213 visorhba_debugfs_dir = debugfs_create_dir("visorhba", NULL);
1214 if (!visorhba_debugfs_dir)
1217 rc = visorbus_register_visor_driver(&visorhba_driver);
1219 goto cleanup_debugfs;
1224 debugfs_remove_recursive(visorhba_debugfs_dir);
1230 * visorhba_exit - Driver exit routine
1232 * Unregister driver from the bus and free up memory.
1234 static void visorhba_exit(void)
1236 visorbus_unregister_visor_driver(&visorhba_driver);
1237 debugfs_remove_recursive(visorhba_debugfs_dir);
1240 module_init(visorhba_init);
1241 module_exit(visorhba_exit);
1243 MODULE_AUTHOR("Unisys");
1244 MODULE_LICENSE("GPL");
1245 MODULE_DESCRIPTION("s-Par HBA driver for virtual SCSI host busses");