2 * libata-eh.c - libata error handling
4 * Maintained by: Tejun Heo <tj@kernel.org>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2006 Tejun Heo <htejun@gmail.com>
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; either version 2, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
35 #include <linux/kernel.h>
36 #include <linux/blkdev.h>
37 #include <linux/export.h>
38 #include <linux/pci.h>
39 #include <scsi/scsi.h>
40 #include <scsi/scsi_host.h>
41 #include <scsi/scsi_eh.h>
42 #include <scsi/scsi_device.h>
43 #include <scsi/scsi_cmnd.h>
44 #include <scsi/scsi_dbg.h>
45 #include "../scsi/scsi_transport_api.h"
47 #include <linux/libata.h>
49 #include <trace/events/libata.h>
53 /* speed down verdicts */
54 ATA_EH_SPDN_NCQ_OFF = (1 << 0),
55 ATA_EH_SPDN_SPEED_DOWN = (1 << 1),
56 ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2),
57 ATA_EH_SPDN_KEEP_ERRORS = (1 << 3),
60 ATA_EFLAG_IS_IO = (1 << 0),
61 ATA_EFLAG_DUBIOUS_XFER = (1 << 1),
62 ATA_EFLAG_OLD_ER = (1 << 31),
64 /* error categories */
67 ATA_ECAT_TOUT_HSM = 2,
69 ATA_ECAT_DUBIOUS_NONE = 4,
70 ATA_ECAT_DUBIOUS_ATA_BUS = 5,
71 ATA_ECAT_DUBIOUS_TOUT_HSM = 6,
72 ATA_ECAT_DUBIOUS_UNK_DEV = 7,
75 ATA_EH_CMD_DFL_TIMEOUT = 5000,
77 /* always put at least this amount of time between resets */
78 ATA_EH_RESET_COOL_DOWN = 5000,
80 /* Waiting in ->prereset can never be reliable. It's
81 * sometimes nice to wait there but it can't be depended upon;
82 * otherwise, we wouldn't be resetting. Just give it enough
83 * time for most drives to spin up.
85 ATA_EH_PRERESET_TIMEOUT = 10000,
86 ATA_EH_FASTDRAIN_INTERVAL = 3000,
90 /* probe speed down parameters, see ata_eh_schedule_probe() */
91 ATA_EH_PROBE_TRIAL_INTERVAL = 60000, /* 1 min */
92 ATA_EH_PROBE_TRIALS = 2,
95 /* The following table determines how we sequence resets. Each entry
96 * represents timeout for that try. The first try can be soft or
97 * hardreset. All others are hardreset if available. In most cases
98 * the first reset w/ 10sec timeout should succeed. Following entries
99 * are mostly for error handling, hotplug and those outlier devices that
100 * take an exceptionally long time to recover from reset.
102 static const unsigned long ata_eh_reset_timeouts[] = {
103 10000, /* most drives spin up by 10sec */
104 10000, /* > 99% working drives spin up before 20sec */
105 35000, /* give > 30 secs of idleness for outlier devices */
106 5000, /* and sweet one last chance */
107 ULONG_MAX, /* > 1 min has elapsed, give up */
110 static const unsigned long ata_eh_identify_timeouts[] = {
111 5000, /* covers > 99% of successes and not too boring on failures */
112 10000, /* combined time till here is enough even for media access */
113 30000, /* for true idiots */
117 static const unsigned long ata_eh_revalidate_timeouts[] = {
118 15000, /* Some drives are slow to read log pages when waking-up */
119 15000, /* combined time till here is enough even for media access */
123 static const unsigned long ata_eh_flush_timeouts[] = {
124 15000, /* be generous with flush */
126 30000, /* and even more generous */
130 static const unsigned long ata_eh_other_timeouts[] = {
131 5000, /* same rationale as identify timeout */
133 /* but no merciful 30sec for other commands, it just isn't worth it */
137 struct ata_eh_cmd_timeout_ent {
139 const unsigned long *timeouts;
142 /* The following table determines timeouts to use for EH internal
143 * commands. Each table entry is a command class and matches the
144 * commands the entry applies to and the timeout table to use.
146 * On the retry after a command timed out, the next timeout value from
147 * the table is used. If the table doesn't contain further entries,
148 * the last value is used.
150 * ehc->cmd_timeout_idx keeps track of which timeout to use per
151 * command class, so if SET_FEATURES times out on the first try, the
152 * next try will use the second timeout value only for that class.
154 #define CMDS(cmds...) (const u8 []){ cmds, 0 }
155 static const struct ata_eh_cmd_timeout_ent
156 ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
157 { .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI),
158 .timeouts = ata_eh_identify_timeouts, },
159 { .commands = CMDS(ATA_CMD_READ_LOG_EXT, ATA_CMD_READ_LOG_DMA_EXT),
160 .timeouts = ata_eh_revalidate_timeouts, },
161 { .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT),
162 .timeouts = ata_eh_other_timeouts, },
163 { .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT),
164 .timeouts = ata_eh_other_timeouts, },
165 { .commands = CMDS(ATA_CMD_SET_FEATURES),
166 .timeouts = ata_eh_other_timeouts, },
167 { .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS),
168 .timeouts = ata_eh_other_timeouts, },
169 { .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT),
170 .timeouts = ata_eh_flush_timeouts },
174 static void __ata_port_freeze(struct ata_port *ap);
176 static void ata_eh_handle_port_suspend(struct ata_port *ap);
177 static void ata_eh_handle_port_resume(struct ata_port *ap);
178 #else /* CONFIG_PM */
179 static void ata_eh_handle_port_suspend(struct ata_port *ap)
182 static void ata_eh_handle_port_resume(struct ata_port *ap)
184 #endif /* CONFIG_PM */
186 static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt,
189 ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len,
190 ATA_EH_DESC_LEN - ehi->desc_len,
195 * __ata_ehi_push_desc - push error description without adding separator
197 * @fmt: printf format string
199 * Format string according to @fmt and append it to @ehi->desc.
202 * spin_lock_irqsave(host lock)
204 void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
209 __ata_ehi_pushv_desc(ehi, fmt, args);
214 * ata_ehi_push_desc - push error description with separator
216 * @fmt: printf format string
218 * Format string according to @fmt and append it to @ehi->desc.
219 * If @ehi->desc is not empty, ", " is added in-between.
222 * spin_lock_irqsave(host lock)
224 void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
229 __ata_ehi_push_desc(ehi, ", ");
232 __ata_ehi_pushv_desc(ehi, fmt, args);
237 * ata_ehi_clear_desc - clean error description
243 * spin_lock_irqsave(host lock)
245 void ata_ehi_clear_desc(struct ata_eh_info *ehi)
252 * ata_port_desc - append port description
253 * @ap: target ATA port
254 * @fmt: printf format string
256 * Format string according to @fmt and append it to port
257 * description. If port description is not empty, " " is added
258 * in-between. This function is to be used while initializing
259 * ata_host. The description is printed on host registration.
264 void ata_port_desc(struct ata_port *ap, const char *fmt, ...)
268 WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING));
270 if (ap->link.eh_info.desc_len)
271 __ata_ehi_push_desc(&ap->link.eh_info, " ");
274 __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args);
281 * ata_port_pbar_desc - append PCI BAR description
282 * @ap: target ATA port
283 * @bar: target PCI BAR
284 * @offset: offset into PCI BAR
285 * @name: name of the area
287 * If @offset is negative, this function formats a string which
288 * contains the name, address, size and type of the BAR and
289 * appends it to the port description. If @offset is zero or
290 * positive, only name and offsetted address is appended.
295 void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
298 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
300 unsigned long long start, len;
302 if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
304 else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
307 start = (unsigned long long)pci_resource_start(pdev, bar);
308 len = (unsigned long long)pci_resource_len(pdev, bar);
311 ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start);
313 ata_port_desc(ap, "%s 0x%llx", name,
314 start + (unsigned long long)offset);
317 #endif /* CONFIG_PCI */
319 static int ata_lookup_timeout_table(u8 cmd)
323 for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) {
326 for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++)
335 * ata_internal_cmd_timeout - determine timeout for an internal command
336 * @dev: target device
337 * @cmd: internal command to be issued
339 * Determine timeout for internal command @cmd for @dev.
345 * Determined timeout.
347 unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd)
349 struct ata_eh_context *ehc = &dev->link->eh_context;
350 int ent = ata_lookup_timeout_table(cmd);
354 return ATA_EH_CMD_DFL_TIMEOUT;
356 idx = ehc->cmd_timeout_idx[dev->devno][ent];
357 return ata_eh_cmd_timeout_table[ent].timeouts[idx];
361 * ata_internal_cmd_timed_out - notification for internal command timeout
362 * @dev: target device
363 * @cmd: internal command which timed out
365 * Notify EH that internal command @cmd for @dev timed out. This
366 * function should be called only for commands whose timeouts are
367 * determined using ata_internal_cmd_timeout().
372 void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd)
374 struct ata_eh_context *ehc = &dev->link->eh_context;
375 int ent = ata_lookup_timeout_table(cmd);
381 idx = ehc->cmd_timeout_idx[dev->devno][ent];
382 if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX)
383 ehc->cmd_timeout_idx[dev->devno][ent]++;
386 static void ata_ering_record(struct ata_ering *ering, unsigned int eflags,
387 unsigned int err_mask)
389 struct ata_ering_entry *ent;
394 ering->cursor %= ATA_ERING_SIZE;
396 ent = &ering->ring[ering->cursor];
397 ent->eflags = eflags;
398 ent->err_mask = err_mask;
399 ent->timestamp = get_jiffies_64();
402 static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering)
404 struct ata_ering_entry *ent = &ering->ring[ering->cursor];
411 int ata_ering_map(struct ata_ering *ering,
412 int (*map_fn)(struct ata_ering_entry *, void *),
416 struct ata_ering_entry *ent;
420 ent = &ering->ring[idx];
423 rc = map_fn(ent, arg);
426 idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
427 } while (idx != ering->cursor);
432 static int ata_ering_clear_cb(struct ata_ering_entry *ent, void *void_arg)
434 ent->eflags |= ATA_EFLAG_OLD_ER;
438 static void ata_ering_clear(struct ata_ering *ering)
440 ata_ering_map(ering, ata_ering_clear_cb, NULL);
443 static unsigned int ata_eh_dev_action(struct ata_device *dev)
445 struct ata_eh_context *ehc = &dev->link->eh_context;
447 return ehc->i.action | ehc->i.dev_action[dev->devno];
450 static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
451 struct ata_eh_info *ehi, unsigned int action)
453 struct ata_device *tdev;
456 ehi->action &= ~action;
457 ata_for_each_dev(tdev, link, ALL)
458 ehi->dev_action[tdev->devno] &= ~action;
460 /* doesn't make sense for port-wide EH actions */
461 WARN_ON(!(action & ATA_EH_PERDEV_MASK));
463 /* break ehi->action into ehi->dev_action */
464 if (ehi->action & action) {
465 ata_for_each_dev(tdev, link, ALL)
466 ehi->dev_action[tdev->devno] |=
467 ehi->action & action;
468 ehi->action &= ~action;
471 /* turn off the specified per-dev action */
472 ehi->dev_action[dev->devno] &= ~action;
477 * ata_eh_acquire - acquire EH ownership
478 * @ap: ATA port to acquire EH ownership for
480 * Acquire EH ownership for @ap. This is the basic exclusion
481 * mechanism for ports sharing a host. Only one port hanging off
482 * the same host can claim the ownership of EH.
487 void ata_eh_acquire(struct ata_port *ap)
489 mutex_lock(&ap->host->eh_mutex);
490 WARN_ON_ONCE(ap->host->eh_owner);
491 ap->host->eh_owner = current;
495 * ata_eh_release - release EH ownership
496 * @ap: ATA port to release EH ownership for
498 * Release EH ownership for @ap if the caller. The caller must
499 * have acquired EH ownership using ata_eh_acquire() previously.
504 void ata_eh_release(struct ata_port *ap)
506 WARN_ON_ONCE(ap->host->eh_owner != current);
507 ap->host->eh_owner = NULL;
508 mutex_unlock(&ap->host->eh_mutex);
512 * ata_scsi_timed_out - SCSI layer time out callback
513 * @cmd: timed out SCSI command
515 * Handles SCSI layer timeout. We race with normal completion of
516 * the qc for @cmd. If the qc is already gone, we lose and let
517 * the scsi command finish (EH_HANDLED). Otherwise, the qc has
518 * timed out and EH should be invoked. Prevent ata_qc_complete()
519 * from finishing it by setting EH_SCHEDULED and return
522 * TODO: kill this function once old EH is gone.
525 * Called from timer context
528 * EH_HANDLED or EH_NOT_HANDLED
530 enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
532 struct Scsi_Host *host = cmd->device->host;
533 struct ata_port *ap = ata_shost_to_port(host);
535 struct ata_queued_cmd *qc;
536 enum blk_eh_timer_return ret;
540 if (ap->ops->error_handler) {
541 ret = BLK_EH_NOT_HANDLED;
545 ret = BLK_EH_HANDLED;
546 spin_lock_irqsave(ap->lock, flags);
547 qc = ata_qc_from_tag(ap, ap->link.active_tag);
549 WARN_ON(qc->scsicmd != cmd);
550 qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
551 qc->err_mask |= AC_ERR_TIMEOUT;
552 ret = BLK_EH_NOT_HANDLED;
554 spin_unlock_irqrestore(ap->lock, flags);
557 DPRINTK("EXIT, ret=%d\n", ret);
561 static void ata_eh_unload(struct ata_port *ap)
563 struct ata_link *link;
564 struct ata_device *dev;
567 /* Restore SControl IPM and SPD for the next driver and
568 * disable attached devices.
570 ata_for_each_link(link, ap, PMP_FIRST) {
571 sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0);
572 ata_for_each_dev(dev, link, ALL)
573 ata_dev_disable(dev);
576 /* freeze and set UNLOADED */
577 spin_lock_irqsave(ap->lock, flags);
579 ata_port_freeze(ap); /* won't be thawed */
580 ap->pflags &= ~ATA_PFLAG_EH_PENDING; /* clear pending from freeze */
581 ap->pflags |= ATA_PFLAG_UNLOADED;
583 spin_unlock_irqrestore(ap->lock, flags);
587 * ata_scsi_error - SCSI layer error handler callback
588 * @host: SCSI host on which error occurred
590 * Handles SCSI-layer-thrown error events.
593 * Inherited from SCSI layer (none, can sleep)
598 void ata_scsi_error(struct Scsi_Host *host)
600 struct ata_port *ap = ata_shost_to_port(host);
602 LIST_HEAD(eh_work_q);
606 spin_lock_irqsave(host->host_lock, flags);
607 list_splice_init(&host->eh_cmd_q, &eh_work_q);
608 spin_unlock_irqrestore(host->host_lock, flags);
610 ata_scsi_cmd_error_handler(host, ap, &eh_work_q);
612 /* If we timed raced normal completion and there is nothing to
613 recover nr_timedout == 0 why exactly are we doing error recovery ? */
614 ata_scsi_port_error_handler(host, ap);
616 /* finish or retry handled scmd's and clean up */
617 WARN_ON(!list_empty(&eh_work_q));
623 * ata_scsi_cmd_error_handler - error callback for a list of commands
624 * @host: scsi host containing the port
625 * @ap: ATA port within the host
626 * @eh_work_q: list of commands to process
628 * process the given list of commands and return those finished to the
629 * ap->eh_done_q. This function is the first part of the libata error
630 * handler which processes a given list of failed commands.
632 void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
633 struct list_head *eh_work_q)
638 /* make sure sff pio task is not running */
639 ata_sff_flush_pio_task(ap);
641 /* synchronize with host lock and sort out timeouts */
643 /* For new EH, all qcs are finished in one of three ways -
644 * normal completion, error completion, and SCSI timeout.
645 * Both completions can race against SCSI timeout. When normal
646 * completion wins, the qc never reaches EH. When error
647 * completion wins, the qc has ATA_QCFLAG_FAILED set.
649 * When SCSI timeout wins, things are a bit more complex.
650 * Normal or error completion can occur after the timeout but
651 * before this point. In such cases, both types of
652 * completions are honored. A scmd is determined to have
653 * timed out iff its associated qc is active and not failed.
655 if (ap->ops->error_handler) {
656 struct scsi_cmnd *scmd, *tmp;
659 spin_lock_irqsave(ap->lock, flags);
661 /* This must occur under the ap->lock as we don't want
662 a polled recovery to race the real interrupt handler
664 The lost_interrupt handler checks for any completed but
665 non-notified command and completes much like an IRQ handler.
667 We then fall into the error recovery code which will treat
668 this as if normal completion won the race */
670 if (ap->ops->lost_interrupt)
671 ap->ops->lost_interrupt(ap);
673 list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) {
674 struct ata_queued_cmd *qc;
676 for (i = 0; i < ATA_MAX_QUEUE; i++) {
677 qc = __ata_qc_from_tag(ap, i);
678 if (qc->flags & ATA_QCFLAG_ACTIVE &&
683 if (i < ATA_MAX_QUEUE) {
684 /* the scmd has an associated qc */
685 if (!(qc->flags & ATA_QCFLAG_FAILED)) {
686 /* which hasn't failed yet, timeout */
687 qc->err_mask |= AC_ERR_TIMEOUT;
688 qc->flags |= ATA_QCFLAG_FAILED;
692 /* Normal completion occurred after
693 * SCSI timeout but before this point.
694 * Successfully complete it.
696 scmd->retries = scmd->allowed;
697 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
701 /* If we have timed out qcs. They belong to EH from
702 * this point but the state of the controller is
703 * unknown. Freeze the port to make sure the IRQ
704 * handler doesn't diddle with those qcs. This must
705 * be done atomically w.r.t. setting QCFLAG_FAILED.
708 __ata_port_freeze(ap);
710 spin_unlock_irqrestore(ap->lock, flags);
712 /* initialize eh_tries */
713 ap->eh_tries = ATA_EH_MAX_TRIES;
715 spin_unlock_wait(ap->lock);
718 EXPORT_SYMBOL(ata_scsi_cmd_error_handler);
721 * ata_scsi_port_error_handler - recover the port after the commands
722 * @host: SCSI host containing the port
725 * Handle the recovery of the port @ap after all the commands
726 * have been recovered.
728 void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
732 /* invoke error handler */
733 if (ap->ops->error_handler) {
734 struct ata_link *link;
736 /* acquire EH ownership */
739 /* kill fast drain timer */
740 del_timer_sync(&ap->fastdrain_timer);
742 /* process port resume request */
743 ata_eh_handle_port_resume(ap);
745 /* fetch & clear EH info */
746 spin_lock_irqsave(ap->lock, flags);
748 ata_for_each_link(link, ap, HOST_FIRST) {
749 struct ata_eh_context *ehc = &link->eh_context;
750 struct ata_device *dev;
752 memset(&link->eh_context, 0, sizeof(link->eh_context));
753 link->eh_context.i = link->eh_info;
754 memset(&link->eh_info, 0, sizeof(link->eh_info));
756 ata_for_each_dev(dev, link, ENABLED) {
757 int devno = dev->devno;
759 ehc->saved_xfer_mode[devno] = dev->xfer_mode;
760 if (ata_ncq_enabled(dev))
761 ehc->saved_ncq_enabled |= 1 << devno;
765 ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
766 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
767 ap->excl_link = NULL; /* don't maintain exclusion over EH */
769 spin_unlock_irqrestore(ap->lock, flags);
771 /* invoke EH, skip if unloading or suspended */
772 if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
773 ap->ops->error_handler(ap);
775 /* if unloading, commence suicide */
776 if ((ap->pflags & ATA_PFLAG_UNLOADING) &&
777 !(ap->pflags & ATA_PFLAG_UNLOADED))
782 /* process port suspend request */
783 ata_eh_handle_port_suspend(ap);
785 /* Exception might have happened after ->error_handler
786 * recovered the port but before this point. Repeat
789 spin_lock_irqsave(ap->lock, flags);
791 if (ap->pflags & ATA_PFLAG_EH_PENDING) {
792 if (--ap->eh_tries) {
793 spin_unlock_irqrestore(ap->lock, flags);
797 "EH pending after %d tries, giving up\n",
799 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
802 /* this run is complete, make sure EH info is clear */
803 ata_for_each_link(link, ap, HOST_FIRST)
804 memset(&link->eh_info, 0, sizeof(link->eh_info));
806 /* end eh (clear host_eh_scheduled) while holding
807 * ap->lock such that if exception occurs after this
808 * point but before EH completion, SCSI midlayer will
813 spin_unlock_irqrestore(ap->lock, flags);
816 WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL);
817 ap->ops->eng_timeout(ap);
820 scsi_eh_flush_done_q(&ap->eh_done_q);
823 spin_lock_irqsave(ap->lock, flags);
825 if (ap->pflags & ATA_PFLAG_LOADING)
826 ap->pflags &= ~ATA_PFLAG_LOADING;
827 else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
828 schedule_delayed_work(&ap->hotplug_task, 0);
830 if (ap->pflags & ATA_PFLAG_RECOVERED)
831 ata_port_info(ap, "EH complete\n");
833 ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
835 /* tell wait_eh that we're done */
836 ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS;
837 wake_up_all(&ap->eh_wait_q);
839 spin_unlock_irqrestore(ap->lock, flags);
841 EXPORT_SYMBOL_GPL(ata_scsi_port_error_handler);
844 * ata_port_wait_eh - Wait for the currently pending EH to complete
845 * @ap: Port to wait EH for
847 * Wait until the currently pending EH is complete.
850 * Kernel thread context (may sleep).
852 void ata_port_wait_eh(struct ata_port *ap)
858 spin_lock_irqsave(ap->lock, flags);
860 while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
861 prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
862 spin_unlock_irqrestore(ap->lock, flags);
864 spin_lock_irqsave(ap->lock, flags);
866 finish_wait(&ap->eh_wait_q, &wait);
868 spin_unlock_irqrestore(ap->lock, flags);
870 /* make sure SCSI EH is complete */
871 if (scsi_host_in_recovery(ap->scsi_host)) {
876 EXPORT_SYMBOL_GPL(ata_port_wait_eh);
878 static int ata_eh_nr_in_flight(struct ata_port *ap)
883 /* count only non-internal commands */
884 for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++)
885 if (ata_qc_from_tag(ap, tag))
891 void ata_eh_fastdrain_timerfn(unsigned long arg)
893 struct ata_port *ap = (void *)arg;
897 spin_lock_irqsave(ap->lock, flags);
899 cnt = ata_eh_nr_in_flight(ap);
905 if (cnt == ap->fastdrain_cnt) {
908 /* No progress during the last interval, tag all
909 * in-flight qcs as timed out and freeze the port.
911 for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) {
912 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
914 qc->err_mask |= AC_ERR_TIMEOUT;
919 /* some qcs have finished, give it another chance */
920 ap->fastdrain_cnt = cnt;
921 ap->fastdrain_timer.expires =
922 ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
923 add_timer(&ap->fastdrain_timer);
927 spin_unlock_irqrestore(ap->lock, flags);
931 * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain
932 * @ap: target ATA port
933 * @fastdrain: activate fast drain
935 * Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain
936 * is non-zero and EH wasn't pending before. Fast drain ensures
937 * that EH kicks in in timely manner.
940 * spin_lock_irqsave(host lock)
942 static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
946 /* already scheduled? */
947 if (ap->pflags & ATA_PFLAG_EH_PENDING)
950 ap->pflags |= ATA_PFLAG_EH_PENDING;
955 /* do we have in-flight qcs? */
956 cnt = ata_eh_nr_in_flight(ap);
960 /* activate fast drain */
961 ap->fastdrain_cnt = cnt;
962 ap->fastdrain_timer.expires =
963 ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
964 add_timer(&ap->fastdrain_timer);
968 * ata_qc_schedule_eh - schedule qc for error handling
969 * @qc: command to schedule error handling for
971 * Schedule error handling for @qc. EH will kick in as soon as
972 * other commands are drained.
975 * spin_lock_irqsave(host lock)
977 void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
979 struct ata_port *ap = qc->ap;
980 struct request_queue *q = qc->scsicmd->device->request_queue;
983 WARN_ON(!ap->ops->error_handler);
985 qc->flags |= ATA_QCFLAG_FAILED;
986 ata_eh_set_pending(ap, 1);
988 /* The following will fail if timeout has already expired.
989 * ata_scsi_error() takes care of such scmds on EH entry.
990 * Note that ATA_QCFLAG_FAILED is unconditionally set after
991 * this function completes.
993 spin_lock_irqsave(q->queue_lock, flags);
994 blk_abort_request(qc->scsicmd->request);
995 spin_unlock_irqrestore(q->queue_lock, flags);
999 * ata_std_sched_eh - non-libsas ata_ports issue eh with this common routine
1000 * @ap: ATA port to schedule EH for
1002 * LOCKING: inherited from ata_port_schedule_eh
1003 * spin_lock_irqsave(host lock)
1005 void ata_std_sched_eh(struct ata_port *ap)
1007 WARN_ON(!ap->ops->error_handler);
1009 if (ap->pflags & ATA_PFLAG_INITIALIZING)
1012 ata_eh_set_pending(ap, 1);
1013 scsi_schedule_eh(ap->scsi_host);
1015 DPRINTK("port EH scheduled\n");
1017 EXPORT_SYMBOL_GPL(ata_std_sched_eh);
1020 * ata_std_end_eh - non-libsas ata_ports complete eh with this common routine
1021 * @ap: ATA port to end EH for
1023 * In the libata object model there is a 1:1 mapping of ata_port to
1024 * shost, so host fields can be directly manipulated under ap->lock, in
1025 * the libsas case we need to hold a lock at the ha->level to coordinate
1029 * spin_lock_irqsave(host lock)
1031 void ata_std_end_eh(struct ata_port *ap)
1033 struct Scsi_Host *host = ap->scsi_host;
1035 host->host_eh_scheduled = 0;
1037 EXPORT_SYMBOL(ata_std_end_eh);
1041 * ata_port_schedule_eh - schedule error handling without a qc
1042 * @ap: ATA port to schedule EH for
1044 * Schedule error handling for @ap. EH will kick in as soon as
1045 * all commands are drained.
1048 * spin_lock_irqsave(host lock)
1050 void ata_port_schedule_eh(struct ata_port *ap)
1052 /* see: ata_std_sched_eh, unless you know better */
1053 ap->ops->sched_eh(ap);
1056 static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
1058 int tag, nr_aborted = 0;
1060 WARN_ON(!ap->ops->error_handler);
1062 /* we're gonna abort all commands, no need for fast drain */
1063 ata_eh_set_pending(ap, 0);
1065 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1066 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
1068 if (qc && (!link || qc->dev->link == link)) {
1069 qc->flags |= ATA_QCFLAG_FAILED;
1070 ata_qc_complete(qc);
1076 ata_port_schedule_eh(ap);
1082 * ata_link_abort - abort all qc's on the link
1083 * @link: ATA link to abort qc's for
1085 * Abort all active qc's active on @link and schedule EH.
1088 * spin_lock_irqsave(host lock)
1091 * Number of aborted qc's.
1093 int ata_link_abort(struct ata_link *link)
1095 return ata_do_link_abort(link->ap, link);
1099 * ata_port_abort - abort all qc's on the port
1100 * @ap: ATA port to abort qc's for
1102 * Abort all active qc's of @ap and schedule EH.
1105 * spin_lock_irqsave(host_set lock)
1108 * Number of aborted qc's.
1110 int ata_port_abort(struct ata_port *ap)
1112 return ata_do_link_abort(ap, NULL);
1116 * __ata_port_freeze - freeze port
1117 * @ap: ATA port to freeze
1119 * This function is called when HSM violation or some other
1120 * condition disrupts normal operation of the port. Frozen port
1121 * is not allowed to perform any operation until the port is
1122 * thawed, which usually follows a successful reset.
1124 * ap->ops->freeze() callback can be used for freezing the port
1125 * hardware-wise (e.g. mask interrupt and stop DMA engine). If a
1126 * port cannot be frozen hardware-wise, the interrupt handler
1127 * must ack and clear interrupts unconditionally while the port
1131 * spin_lock_irqsave(host lock)
1133 static void __ata_port_freeze(struct ata_port *ap)
1135 WARN_ON(!ap->ops->error_handler);
1137 if (ap->ops->freeze)
1138 ap->ops->freeze(ap);
1140 ap->pflags |= ATA_PFLAG_FROZEN;
1142 DPRINTK("ata%u port frozen\n", ap->print_id);
1146 * ata_port_freeze - abort & freeze port
1147 * @ap: ATA port to freeze
1149 * Abort and freeze @ap. The freeze operation must be called
1150 * first, because some hardware requires special operations
1151 * before the taskfile registers are accessible.
1154 * spin_lock_irqsave(host lock)
1157 * Number of aborted commands.
1159 int ata_port_freeze(struct ata_port *ap)
1163 WARN_ON(!ap->ops->error_handler);
1165 __ata_port_freeze(ap);
1166 nr_aborted = ata_port_abort(ap);
1172 * sata_async_notification - SATA async notification handler
1173 * @ap: ATA port where async notification is received
1175 * Handler to be called when async notification via SDB FIS is
1176 * received. This function schedules EH if necessary.
1179 * spin_lock_irqsave(host lock)
1182 * 1 if EH is scheduled, 0 otherwise.
1184 int sata_async_notification(struct ata_port *ap)
1189 if (!(ap->flags & ATA_FLAG_AN))
1192 rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
1194 sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
1196 if (!sata_pmp_attached(ap) || rc) {
1197 /* PMP is not attached or SNTF is not available */
1198 if (!sata_pmp_attached(ap)) {
1199 /* PMP is not attached. Check whether ATAPI
1200 * AN is configured. If so, notify media
1203 struct ata_device *dev = ap->link.device;
1205 if ((dev->class == ATA_DEV_ATAPI) &&
1206 (dev->flags & ATA_DFLAG_AN))
1207 ata_scsi_media_change_notify(dev);
1210 /* PMP is attached but SNTF is not available.
1211 * ATAPI async media change notification is
1212 * not used. The PMP must be reporting PHY
1213 * status change, schedule EH.
1215 ata_port_schedule_eh(ap);
1219 /* PMP is attached and SNTF is available */
1220 struct ata_link *link;
1222 /* check and notify ATAPI AN */
1223 ata_for_each_link(link, ap, EDGE) {
1224 if (!(sntf & (1 << link->pmp)))
1227 if ((link->device->class == ATA_DEV_ATAPI) &&
1228 (link->device->flags & ATA_DFLAG_AN))
1229 ata_scsi_media_change_notify(link->device);
1232 /* If PMP is reporting that PHY status of some
1233 * downstream ports has changed, schedule EH.
1235 if (sntf & (1 << SATA_PMP_CTRL_PORT)) {
1236 ata_port_schedule_eh(ap);
1245 * ata_eh_freeze_port - EH helper to freeze port
1246 * @ap: ATA port to freeze
1253 void ata_eh_freeze_port(struct ata_port *ap)
1255 unsigned long flags;
1257 if (!ap->ops->error_handler)
1260 spin_lock_irqsave(ap->lock, flags);
1261 __ata_port_freeze(ap);
1262 spin_unlock_irqrestore(ap->lock, flags);
1266 * ata_port_thaw_port - EH helper to thaw port
1267 * @ap: ATA port to thaw
1269 * Thaw frozen port @ap.
1274 void ata_eh_thaw_port(struct ata_port *ap)
1276 unsigned long flags;
1278 if (!ap->ops->error_handler)
1281 spin_lock_irqsave(ap->lock, flags);
1283 ap->pflags &= ~ATA_PFLAG_FROZEN;
1288 spin_unlock_irqrestore(ap->lock, flags);
1290 DPRINTK("ata%u port thawed\n", ap->print_id);
1293 static void ata_eh_scsidone(struct scsi_cmnd *scmd)
1298 static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
1300 struct ata_port *ap = qc->ap;
1301 struct scsi_cmnd *scmd = qc->scsicmd;
1302 unsigned long flags;
1304 spin_lock_irqsave(ap->lock, flags);
1305 qc->scsidone = ata_eh_scsidone;
1306 __ata_qc_complete(qc);
1307 WARN_ON(ata_tag_valid(qc->tag));
1308 spin_unlock_irqrestore(ap->lock, flags);
1310 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
1314 * ata_eh_qc_complete - Complete an active ATA command from EH
1315 * @qc: Command to complete
1317 * Indicate to the mid and upper layers that an ATA command has
1318 * completed. To be used from EH.
1320 void ata_eh_qc_complete(struct ata_queued_cmd *qc)
1322 struct scsi_cmnd *scmd = qc->scsicmd;
1323 scmd->retries = scmd->allowed;
1324 __ata_eh_qc_complete(qc);
1328 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
1329 * @qc: Command to retry
1331 * Indicate to the mid and upper layers that an ATA command
1332 * should be retried. To be used from EH.
1334 * SCSI midlayer limits the number of retries to scmd->allowed.
1335 * scmd->allowed is incremented for commands which get retried
1336 * due to unrelated failures (qc->err_mask is zero).
1338 void ata_eh_qc_retry(struct ata_queued_cmd *qc)
1340 struct scsi_cmnd *scmd = qc->scsicmd;
1343 __ata_eh_qc_complete(qc);
1347 * ata_dev_disable - disable ATA device
1348 * @dev: ATA device to disable
1355 void ata_dev_disable(struct ata_device *dev)
1357 if (!ata_dev_enabled(dev))
1360 if (ata_msg_drv(dev->link->ap))
1361 ata_dev_warn(dev, "disabled\n");
1362 ata_acpi_on_disable(dev);
1363 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET);
1366 /* From now till the next successful probe, ering is used to
1367 * track probe failures. Clear accumulated device error info.
1369 ata_ering_clear(&dev->ering);
1373 * ata_eh_detach_dev - detach ATA device
1374 * @dev: ATA device to detach
1381 void ata_eh_detach_dev(struct ata_device *dev)
1383 struct ata_link *link = dev->link;
1384 struct ata_port *ap = link->ap;
1385 struct ata_eh_context *ehc = &link->eh_context;
1386 unsigned long flags;
1388 ata_dev_disable(dev);
1390 spin_lock_irqsave(ap->lock, flags);
1392 dev->flags &= ~ATA_DFLAG_DETACH;
1394 if (ata_scsi_offline_dev(dev)) {
1395 dev->flags |= ATA_DFLAG_DETACHED;
1396 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
1399 /* clear per-dev EH info */
1400 ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK);
1401 ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK);
1402 ehc->saved_xfer_mode[dev->devno] = 0;
1403 ehc->saved_ncq_enabled &= ~(1 << dev->devno);
1405 spin_unlock_irqrestore(ap->lock, flags);
1409 * ata_eh_about_to_do - about to perform eh_action
1410 * @link: target ATA link
1411 * @dev: target ATA dev for per-dev action (can be NULL)
1412 * @action: action about to be performed
1414 * Called just before performing EH actions to clear related bits
1415 * in @link->eh_info such that eh actions are not unnecessarily
1421 void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
1422 unsigned int action)
1424 struct ata_port *ap = link->ap;
1425 struct ata_eh_info *ehi = &link->eh_info;
1426 struct ata_eh_context *ehc = &link->eh_context;
1427 unsigned long flags;
1429 spin_lock_irqsave(ap->lock, flags);
1431 ata_eh_clear_action(link, dev, ehi, action);
1433 /* About to take EH action, set RECOVERED. Ignore actions on
1434 * slave links as master will do them again.
1436 if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link)
1437 ap->pflags |= ATA_PFLAG_RECOVERED;
1439 spin_unlock_irqrestore(ap->lock, flags);
1443 * ata_eh_done - EH action complete
1444 * @ap: target ATA port
1445 * @dev: target ATA dev for per-dev action (can be NULL)
1446 * @action: action just completed
1448 * Called right after performing EH actions to clear related bits
1449 * in @link->eh_context.
1454 void ata_eh_done(struct ata_link *link, struct ata_device *dev,
1455 unsigned int action)
1457 struct ata_eh_context *ehc = &link->eh_context;
1459 ata_eh_clear_action(link, dev, &ehc->i, action);
1463 * ata_err_string - convert err_mask to descriptive string
1464 * @err_mask: error mask to convert to string
1466 * Convert @err_mask to descriptive string. Errors are
1467 * prioritized according to severity and only the most severe
1468 * error is reported.
1474 * Descriptive string for @err_mask
1476 static const char *ata_err_string(unsigned int err_mask)
1478 if (err_mask & AC_ERR_HOST_BUS)
1479 return "host bus error";
1480 if (err_mask & AC_ERR_ATA_BUS)
1481 return "ATA bus error";
1482 if (err_mask & AC_ERR_TIMEOUT)
1484 if (err_mask & AC_ERR_HSM)
1485 return "HSM violation";
1486 if (err_mask & AC_ERR_SYSTEM)
1487 return "internal error";
1488 if (err_mask & AC_ERR_MEDIA)
1489 return "media error";
1490 if (err_mask & AC_ERR_INVALID)
1491 return "invalid argument";
1492 if (err_mask & AC_ERR_DEV)
1493 return "device error";
1494 return "unknown error";
1498 * ata_read_log_page - read a specific log page
1499 * @dev: target device
1501 * @page: page to read
1502 * @buf: buffer to store read page
1503 * @sectors: number of sectors to read
1505 * Read log page using READ_LOG_EXT command.
1508 * Kernel thread context (may sleep).
1511 * 0 on success, AC_ERR_* mask otherwise.
1513 unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
1514 u8 page, void *buf, unsigned int sectors)
1516 unsigned long ap_flags = dev->link->ap->flags;
1517 struct ata_taskfile tf;
1518 unsigned int err_mask;
1521 DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page);
1524 * Return error without actually issuing the command on controllers
1525 * which e.g. lockup on a read log page.
1527 if (ap_flags & ATA_FLAG_NO_LOG_PAGE)
1531 ata_tf_init(dev, &tf);
1532 if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) &&
1533 !(dev->horkage & ATA_HORKAGE_NO_NCQ_LOG)) {
1534 tf.command = ATA_CMD_READ_LOG_DMA_EXT;
1535 tf.protocol = ATA_PROT_DMA;
1538 tf.command = ATA_CMD_READ_LOG_EXT;
1539 tf.protocol = ATA_PROT_PIO;
1545 tf.hob_nsect = sectors >> 8;
1546 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
1548 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1549 buf, sectors * ATA_SECT_SIZE, 0);
1551 if (err_mask && dma) {
1552 dev->horkage |= ATA_HORKAGE_NO_NCQ_LOG;
1553 ata_dev_warn(dev, "READ LOG DMA EXT failed, trying unqueued\n");
1557 DPRINTK("EXIT, err_mask=%x\n", err_mask);
1562 * ata_eh_read_log_10h - Read log page 10h for NCQ error details
1563 * @dev: Device to read log page 10h from
1564 * @tag: Resulting tag of the failed command
1565 * @tf: Resulting taskfile registers of the failed command
1567 * Read log page 10h to obtain NCQ error details and clear error
1571 * Kernel thread context (may sleep).
1574 * 0 on success, -errno otherwise.
1576 static int ata_eh_read_log_10h(struct ata_device *dev,
1577 int *tag, struct ata_taskfile *tf)
1579 u8 *buf = dev->link->ap->sector_buf;
1580 unsigned int err_mask;
1584 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, 0, buf, 1);
1589 for (i = 0; i < ATA_SECT_SIZE; i++)
1592 ata_dev_warn(dev, "invalid checksum 0x%x on log page 10h\n",
1598 *tag = buf[0] & 0x1f;
1600 tf->command = buf[2];
1601 tf->feature = buf[3];
1605 tf->device = buf[7];
1606 tf->hob_lbal = buf[8];
1607 tf->hob_lbam = buf[9];
1608 tf->hob_lbah = buf[10];
1609 tf->nsect = buf[12];
1610 tf->hob_nsect = buf[13];
1611 if (dev->class == ATA_DEV_ZAC && ata_id_has_ncq_autosense(dev->id))
1612 tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16];
1618 * atapi_eh_tur - perform ATAPI TEST_UNIT_READY
1619 * @dev: target ATAPI device
1620 * @r_sense_key: out parameter for sense_key
1622 * Perform ATAPI TEST_UNIT_READY.
1625 * EH context (may sleep).
1628 * 0 on success, AC_ERR_* mask on failure.
1630 unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
1632 u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 };
1633 struct ata_taskfile tf;
1634 unsigned int err_mask;
1636 ata_tf_init(dev, &tf);
1638 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1639 tf.command = ATA_CMD_PACKET;
1640 tf.protocol = ATAPI_PROT_NODATA;
1642 err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0);
1643 if (err_mask == AC_ERR_DEV)
1644 *r_sense_key = tf.feature >> 4;
1649 * ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
1650 * @dev: device to perform REQUEST_SENSE_SENSE_DATA_EXT to
1651 * @cmd: scsi command for which the sense code should be set
1653 * Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK
1654 * SENSE. This function is an EH helper.
1657 * Kernel thread context (may sleep).
1659 static void ata_eh_request_sense(struct ata_queued_cmd *qc,
1660 struct scsi_cmnd *cmd)
1662 struct ata_device *dev = qc->dev;
1663 struct ata_taskfile tf;
1664 unsigned int err_mask;
1666 if (qc->ap->pflags & ATA_PFLAG_FROZEN) {
1667 ata_dev_warn(dev, "sense data available but port frozen\n");
1671 if (!cmd || qc->flags & ATA_QCFLAG_SENSE_VALID)
1674 if (!ata_id_sense_reporting_enabled(dev->id)) {
1675 ata_dev_warn(qc->dev, "sense data reporting disabled\n");
1679 DPRINTK("ATA request sense\n");
1681 ata_tf_init(dev, &tf);
1682 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1683 tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1684 tf.command = ATA_CMD_REQ_SENSE_DATA;
1685 tf.protocol = ATA_PROT_NODATA;
1687 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1688 /* Ignore err_mask; ATA_ERR might be set */
1689 if (tf.command & ATA_SENSE) {
1690 ata_scsi_set_sense(dev, cmd, tf.lbah, tf.lbam, tf.lbal);
1691 qc->flags |= ATA_QCFLAG_SENSE_VALID;
1693 ata_dev_warn(dev, "request sense failed stat %02x emask %x\n",
1694 tf.command, err_mask);
1699 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
1700 * @dev: device to perform REQUEST_SENSE to
1701 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
1702 * @dfl_sense_key: default sense key to use
1704 * Perform ATAPI REQUEST_SENSE after the device reported CHECK
1705 * SENSE. This function is EH helper.
1708 * Kernel thread context (may sleep).
1711 * 0 on success, AC_ERR_* mask on failure
1713 unsigned int atapi_eh_request_sense(struct ata_device *dev,
1714 u8 *sense_buf, u8 dfl_sense_key)
1716 u8 cdb[ATAPI_CDB_LEN] =
1717 { REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 };
1718 struct ata_port *ap = dev->link->ap;
1719 struct ata_taskfile tf;
1721 DPRINTK("ATAPI request sense\n");
1723 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
1725 /* initialize sense_buf with the error register,
1726 * for the case where they are -not- overwritten
1728 sense_buf[0] = 0x70;
1729 sense_buf[2] = dfl_sense_key;
1731 /* some devices time out if garbage left in tf */
1732 ata_tf_init(dev, &tf);
1734 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1735 tf.command = ATA_CMD_PACKET;
1737 /* is it pointless to prefer PIO for "safety reasons"? */
1738 if (ap->flags & ATA_FLAG_PIO_DMA) {
1739 tf.protocol = ATAPI_PROT_DMA;
1740 tf.feature |= ATAPI_PKT_DMA;
1742 tf.protocol = ATAPI_PROT_PIO;
1743 tf.lbam = SCSI_SENSE_BUFFERSIZE;
1747 return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
1748 sense_buf, SCSI_SENSE_BUFFERSIZE, 0);
1752 * ata_eh_analyze_serror - analyze SError for a failed port
1753 * @link: ATA link to analyze SError for
1755 * Analyze SError if available and further determine cause of
1761 static void ata_eh_analyze_serror(struct ata_link *link)
1763 struct ata_eh_context *ehc = &link->eh_context;
1764 u32 serror = ehc->i.serror;
1765 unsigned int err_mask = 0, action = 0;
1768 if (serror & (SERR_PERSISTENT | SERR_DATA)) {
1769 err_mask |= AC_ERR_ATA_BUS;
1770 action |= ATA_EH_RESET;
1772 if (serror & SERR_PROTOCOL) {
1773 err_mask |= AC_ERR_HSM;
1774 action |= ATA_EH_RESET;
1776 if (serror & SERR_INTERNAL) {
1777 err_mask |= AC_ERR_SYSTEM;
1778 action |= ATA_EH_RESET;
1781 /* Determine whether a hotplug event has occurred. Both
1782 * SError.N/X are considered hotplug events for enabled or
1783 * host links. For disabled PMP links, only N bit is
1784 * considered as X bit is left at 1 for link plugging.
1786 if (link->lpm_policy > ATA_LPM_MAX_POWER)
1787 hotplug_mask = 0; /* hotplug doesn't work w/ LPM */
1788 else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link))
1789 hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG;
1791 hotplug_mask = SERR_PHYRDY_CHG;
1793 if (serror & hotplug_mask)
1794 ata_ehi_hotplugged(&ehc->i);
1796 ehc->i.err_mask |= err_mask;
1797 ehc->i.action |= action;
1801 * ata_eh_analyze_ncq_error - analyze NCQ error
1802 * @link: ATA link to analyze NCQ error for
1804 * Read log page 10h, determine the offending qc and acquire
1805 * error status TF. For NCQ device errors, all LLDDs have to do
1806 * is setting AC_ERR_DEV in ehi->err_mask. This function takes
1810 * Kernel thread context (may sleep).
1812 void ata_eh_analyze_ncq_error(struct ata_link *link)
1814 struct ata_port *ap = link->ap;
1815 struct ata_eh_context *ehc = &link->eh_context;
1816 struct ata_device *dev = link->device;
1817 struct ata_queued_cmd *qc;
1818 struct ata_taskfile tf;
1821 /* if frozen, we can't do much */
1822 if (ap->pflags & ATA_PFLAG_FROZEN)
1825 /* is it NCQ device error? */
1826 if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
1829 /* has LLDD analyzed already? */
1830 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1831 qc = __ata_qc_from_tag(ap, tag);
1833 if (!(qc->flags & ATA_QCFLAG_FAILED))
1840 /* okay, this error is ours */
1841 memset(&tf, 0, sizeof(tf));
1842 rc = ata_eh_read_log_10h(dev, &tag, &tf);
1844 ata_link_err(link, "failed to read log page 10h (errno=%d)\n",
1849 if (!(link->sactive & (1 << tag))) {
1850 ata_link_err(link, "log page 10h reported inactive tag %d\n",
1855 /* we've got the perpetrator, condemn it */
1856 qc = __ata_qc_from_tag(ap, tag);
1857 memcpy(&qc->result_tf, &tf, sizeof(tf));
1858 qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1859 qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
1860 if (dev->class == ATA_DEV_ZAC &&
1861 ((qc->result_tf.command & ATA_SENSE) || qc->result_tf.auxiliary)) {
1862 char sense_key, asc, ascq;
1864 sense_key = (qc->result_tf.auxiliary >> 16) & 0xff;
1865 asc = (qc->result_tf.auxiliary >> 8) & 0xff;
1866 ascq = qc->result_tf.auxiliary & 0xff;
1867 ata_scsi_set_sense(dev, qc->scsicmd, sense_key, asc, ascq);
1868 ata_scsi_set_sense_information(dev, qc->scsicmd,
1870 qc->flags |= ATA_QCFLAG_SENSE_VALID;
1873 ehc->i.err_mask &= ~AC_ERR_DEV;
1877 * ata_eh_analyze_tf - analyze taskfile of a failed qc
1878 * @qc: qc to analyze
1879 * @tf: Taskfile registers to analyze
1881 * Analyze taskfile of @qc and further determine cause of
1882 * failure. This function also requests ATAPI sense data if
1886 * Kernel thread context (may sleep).
1889 * Determined recovery action
1891 static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1892 const struct ata_taskfile *tf)
1894 unsigned int tmp, action = 0;
1895 u8 stat = tf->command, err = tf->feature;
1897 if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
1898 qc->err_mask |= AC_ERR_HSM;
1899 return ATA_EH_RESET;
1902 if (stat & (ATA_ERR | ATA_DF)) {
1903 qc->err_mask |= AC_ERR_DEV;
1905 * Sense data reporting does not work if the
1906 * device fault bit is set.
1914 switch (qc->dev->class) {
1916 if (stat & ATA_SENSE)
1917 ata_eh_request_sense(qc, qc->scsicmd);
1921 qc->err_mask |= AC_ERR_ATA_BUS;
1922 if (err & (ATA_UNC | ATA_AMNF))
1923 qc->err_mask |= AC_ERR_MEDIA;
1925 qc->err_mask |= AC_ERR_INVALID;
1929 if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
1930 tmp = atapi_eh_request_sense(qc->dev,
1931 qc->scsicmd->sense_buffer,
1932 qc->result_tf.feature >> 4);
1934 qc->flags |= ATA_QCFLAG_SENSE_VALID;
1936 qc->err_mask |= tmp;
1940 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
1941 int ret = scsi_check_sense(qc->scsicmd);
1943 * SUCCESS here means that the sense code could
1944 * evaluated and should be passed to the upper layers
1945 * for correct evaluation.
1946 * FAILED means the sense code could not interpreted
1947 * and the device would need to be reset.
1948 * NEEDS_RETRY and ADD_TO_MLQUEUE means that the
1949 * command would need to be retried.
1951 if (ret == NEEDS_RETRY || ret == ADD_TO_MLQUEUE) {
1952 qc->flags |= ATA_QCFLAG_RETRY;
1953 qc->err_mask |= AC_ERR_OTHER;
1954 } else if (ret != SUCCESS) {
1955 qc->err_mask |= AC_ERR_HSM;
1958 if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
1959 action |= ATA_EH_RESET;
1964 static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask,
1969 if (!(eflags & ATA_EFLAG_DUBIOUS_XFER))
1973 base = ATA_ECAT_DUBIOUS_NONE;
1975 if (err_mask & AC_ERR_ATA_BUS)
1976 return base + ATA_ECAT_ATA_BUS;
1978 if (err_mask & AC_ERR_TIMEOUT)
1979 return base + ATA_ECAT_TOUT_HSM;
1981 if (eflags & ATA_EFLAG_IS_IO) {
1982 if (err_mask & AC_ERR_HSM)
1983 return base + ATA_ECAT_TOUT_HSM;
1985 (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
1986 return base + ATA_ECAT_UNK_DEV;
1992 struct speed_down_verdict_arg {
1995 int nr_errors[ATA_ECAT_NR];
1998 static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
2000 struct speed_down_verdict_arg *arg = void_arg;
2003 if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < arg->since))
2006 cat = ata_eh_categorize_error(ent->eflags, ent->err_mask,
2008 arg->nr_errors[cat]++;
2014 * ata_eh_speed_down_verdict - Determine speed down verdict
2015 * @dev: Device of interest
2017 * This function examines error ring of @dev and determines
2018 * whether NCQ needs to be turned off, transfer speed should be
2019 * stepped down, or falling back to PIO is necessary.
2021 * ECAT_ATA_BUS : ATA_BUS error for any command
2023 * ECAT_TOUT_HSM : TIMEOUT for any command or HSM violation for
2026 * ECAT_UNK_DEV : Unknown DEV error for IO commands
2028 * ECAT_DUBIOUS_* : Identical to above three but occurred while
2029 * data transfer hasn't been verified.
2033 * NCQ_OFF : Turn off NCQ.
2035 * SPEED_DOWN : Speed down transfer speed but don't fall back
2038 * FALLBACK_TO_PIO : Fall back to PIO.
2040 * Even if multiple verdicts are returned, only one action is
2041 * taken per error. An action triggered by non-DUBIOUS errors
2042 * clears ering, while one triggered by DUBIOUS_* errors doesn't.
2043 * This is to expedite speed down decisions right after device is
2044 * initially configured.
2046 * The followings are speed down rules. #1 and #2 deal with
2049 * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors
2050 * occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO.
2052 * 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors
2053 * occurred during last 5 mins, NCQ_OFF.
2055 * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors
2056 * occurred during last 5 mins, FALLBACK_TO_PIO
2058 * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred
2059 * during last 10 mins, NCQ_OFF.
2061 * 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6
2062 * UNK_DEV errors occurred during last 10 mins, SPEED_DOWN.
2065 * Inherited from caller.
2068 * OR of ATA_EH_SPDN_* flags.
2070 static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
2072 const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ;
2073 u64 j64 = get_jiffies_64();
2074 struct speed_down_verdict_arg arg;
2075 unsigned int verdict = 0;
2077 /* scan past 5 mins of error history */
2078 memset(&arg, 0, sizeof(arg));
2079 arg.since = j64 - min(j64, j5mins);
2080 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
2082 if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] +
2083 arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1)
2084 verdict |= ATA_EH_SPDN_SPEED_DOWN |
2085 ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS;
2087 if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] +
2088 arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1)
2089 verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS;
2091 if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
2092 arg.nr_errors[ATA_ECAT_TOUT_HSM] +
2093 arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
2094 verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO;
2096 /* scan past 10 mins of error history */
2097 memset(&arg, 0, sizeof(arg));
2098 arg.since = j64 - min(j64, j10mins);
2099 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
2101 if (arg.nr_errors[ATA_ECAT_TOUT_HSM] +
2102 arg.nr_errors[ATA_ECAT_UNK_DEV] > 3)
2103 verdict |= ATA_EH_SPDN_NCQ_OFF;
2105 if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
2106 arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 ||
2107 arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
2108 verdict |= ATA_EH_SPDN_SPEED_DOWN;
2114 * ata_eh_speed_down - record error and speed down if necessary
2115 * @dev: Failed device
2116 * @eflags: mask of ATA_EFLAG_* flags
2117 * @err_mask: err_mask of the error
2119 * Record error and examine error history to determine whether
2120 * adjusting transmission speed is necessary. It also sets
2121 * transmission limits appropriately if such adjustment is
2125 * Kernel thread context (may sleep).
2128 * Determined recovery action.
2130 static unsigned int ata_eh_speed_down(struct ata_device *dev,
2131 unsigned int eflags, unsigned int err_mask)
2133 struct ata_link *link = ata_dev_phys_link(dev);
2135 unsigned int verdict;
2136 unsigned int action = 0;
2138 /* don't bother if Cat-0 error */
2139 if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0)
2142 /* record error and determine whether speed down is necessary */
2143 ata_ering_record(&dev->ering, eflags, err_mask);
2144 verdict = ata_eh_speed_down_verdict(dev);
2147 if ((verdict & ATA_EH_SPDN_NCQ_OFF) &&
2148 (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ |
2149 ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) {
2150 dev->flags |= ATA_DFLAG_NCQ_OFF;
2151 ata_dev_warn(dev, "NCQ disabled due to excessive errors\n");
2156 if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
2157 /* speed down SATA link speed if possible */
2158 if (sata_down_spd_limit(link, 0) == 0) {
2159 action |= ATA_EH_RESET;
2163 /* lower transfer mode */
2164 if (dev->spdn_cnt < 2) {
2165 static const int dma_dnxfer_sel[] =
2166 { ATA_DNXFER_DMA, ATA_DNXFER_40C };
2167 static const int pio_dnxfer_sel[] =
2168 { ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 };
2171 if (dev->xfer_shift != ATA_SHIFT_PIO)
2172 sel = dma_dnxfer_sel[dev->spdn_cnt];
2174 sel = pio_dnxfer_sel[dev->spdn_cnt];
2178 if (ata_down_xfermask_limit(dev, sel) == 0) {
2179 action |= ATA_EH_RESET;
2185 /* Fall back to PIO? Slowing down to PIO is meaningless for
2186 * SATA ATA devices. Consider it only for PATA and SATAPI.
2188 if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) &&
2189 (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) &&
2190 (dev->xfer_shift != ATA_SHIFT_PIO)) {
2191 if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) {
2193 action |= ATA_EH_RESET;
2200 /* device has been slowed down, blow error history */
2201 if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS))
2202 ata_ering_clear(&dev->ering);
2207 * ata_eh_worth_retry - analyze error and decide whether to retry
2208 * @qc: qc to possibly retry
2210 * Look at the cause of the error and decide if a retry
2211 * might be useful or not. We don't want to retry media errors
2212 * because the drive itself has probably already taken 10-30 seconds
2213 * doing its own internal retries before reporting the failure.
2215 static inline int ata_eh_worth_retry(struct ata_queued_cmd *qc)
2217 if (qc->err_mask & AC_ERR_MEDIA)
2218 return 0; /* don't retry media errors */
2219 if (qc->flags & ATA_QCFLAG_IO)
2220 return 1; /* otherwise retry anything from fs stack */
2221 if (qc->err_mask & AC_ERR_INVALID)
2222 return 0; /* don't retry these */
2223 return qc->err_mask != AC_ERR_DEV; /* retry if not dev error */
2227 * ata_eh_link_autopsy - analyze error and determine recovery action
2228 * @link: host link to perform autopsy on
2230 * Analyze why @link failed and determine which recovery actions
2231 * are needed. This function also sets more detailed AC_ERR_*
2232 * values and fills sense data for ATAPI CHECK SENSE.
2235 * Kernel thread context (may sleep).
2237 static void ata_eh_link_autopsy(struct ata_link *link)
2239 struct ata_port *ap = link->ap;
2240 struct ata_eh_context *ehc = &link->eh_context;
2241 struct ata_device *dev;
2242 unsigned int all_err_mask = 0, eflags = 0;
2249 if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
2252 /* obtain and analyze SError */
2253 rc = sata_scr_read(link, SCR_ERROR, &serror);
2255 ehc->i.serror |= serror;
2256 ata_eh_analyze_serror(link);
2257 } else if (rc != -EOPNOTSUPP) {
2258 /* SError read failed, force reset and probing */
2259 ehc->i.probe_mask |= ATA_ALL_DEVICES;
2260 ehc->i.action |= ATA_EH_RESET;
2261 ehc->i.err_mask |= AC_ERR_OTHER;
2264 /* analyze NCQ failure */
2265 ata_eh_analyze_ncq_error(link);
2267 /* any real error trumps AC_ERR_OTHER */
2268 if (ehc->i.err_mask & ~AC_ERR_OTHER)
2269 ehc->i.err_mask &= ~AC_ERR_OTHER;
2271 all_err_mask |= ehc->i.err_mask;
2273 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2274 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2276 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2277 ata_dev_phys_link(qc->dev) != link)
2280 /* inherit upper level err_mask */
2281 qc->err_mask |= ehc->i.err_mask;
2284 ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf);
2286 /* DEV errors are probably spurious in case of ATA_BUS error */
2287 if (qc->err_mask & AC_ERR_ATA_BUS)
2288 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
2291 /* any real error trumps unknown error */
2292 if (qc->err_mask & ~AC_ERR_OTHER)
2293 qc->err_mask &= ~AC_ERR_OTHER;
2296 * SENSE_VALID trumps dev/unknown error and revalidation. Upper
2297 * layers will determine whether the command is worth retrying
2298 * based on the sense data and device class/type. Otherwise,
2299 * determine directly if the command is worth retrying using its
2300 * error mask and flags.
2302 if (qc->flags & ATA_QCFLAG_SENSE_VALID)
2303 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
2304 else if (ata_eh_worth_retry(qc))
2305 qc->flags |= ATA_QCFLAG_RETRY;
2307 /* accumulate error info */
2308 ehc->i.dev = qc->dev;
2309 all_err_mask |= qc->err_mask;
2310 if (qc->flags & ATA_QCFLAG_IO)
2311 eflags |= ATA_EFLAG_IS_IO;
2312 trace_ata_eh_link_autopsy_qc(qc);
2315 /* enforce default EH actions */
2316 if (ap->pflags & ATA_PFLAG_FROZEN ||
2317 all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
2318 ehc->i.action |= ATA_EH_RESET;
2319 else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) ||
2320 (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV)))
2321 ehc->i.action |= ATA_EH_REVALIDATE;
2323 /* If we have offending qcs and the associated failed device,
2324 * perform per-dev EH action only on the offending device.
2327 ehc->i.dev_action[ehc->i.dev->devno] |=
2328 ehc->i.action & ATA_EH_PERDEV_MASK;
2329 ehc->i.action &= ~ATA_EH_PERDEV_MASK;
2332 /* propagate timeout to host link */
2333 if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link))
2334 ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT;
2336 /* record error and consider speeding down */
2338 if (!dev && ((ata_link_max_devices(link) == 1 &&
2339 ata_dev_enabled(link->device))))
2343 if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
2344 eflags |= ATA_EFLAG_DUBIOUS_XFER;
2345 ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
2346 trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask);
2352 * ata_eh_autopsy - analyze error and determine recovery action
2353 * @ap: host port to perform autopsy on
2355 * Analyze all links of @ap and determine why they failed and
2356 * which recovery actions are needed.
2359 * Kernel thread context (may sleep).
2361 void ata_eh_autopsy(struct ata_port *ap)
2363 struct ata_link *link;
2365 ata_for_each_link(link, ap, EDGE)
2366 ata_eh_link_autopsy(link);
2368 /* Handle the frigging slave link. Autopsy is done similarly
2369 * but actions and flags are transferred over to the master
2370 * link and handled from there.
2372 if (ap->slave_link) {
2373 struct ata_eh_context *mehc = &ap->link.eh_context;
2374 struct ata_eh_context *sehc = &ap->slave_link->eh_context;
2376 /* transfer control flags from master to slave */
2377 sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK;
2379 /* perform autopsy on the slave link */
2380 ata_eh_link_autopsy(ap->slave_link);
2382 /* transfer actions from slave to master and clear slave */
2383 ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2384 mehc->i.action |= sehc->i.action;
2385 mehc->i.dev_action[1] |= sehc->i.dev_action[1];
2386 mehc->i.flags |= sehc->i.flags;
2387 ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2390 /* Autopsy of fanout ports can affect host link autopsy.
2391 * Perform host link autopsy last.
2393 if (sata_pmp_attached(ap))
2394 ata_eh_link_autopsy(&ap->link);
2398 * ata_get_cmd_descript - get description for ATA command
2399 * @command: ATA command code to get description for
2401 * Return a textual description of the given command, or NULL if the
2402 * command is not known.
2407 const char *ata_get_cmd_descript(u8 command)
2409 #ifdef CONFIG_ATA_VERBOSE_ERROR
2415 { ATA_CMD_DEV_RESET, "DEVICE RESET" },
2416 { ATA_CMD_CHK_POWER, "CHECK POWER MODE" },
2417 { ATA_CMD_STANDBY, "STANDBY" },
2418 { ATA_CMD_IDLE, "IDLE" },
2419 { ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" },
2420 { ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" },
2421 { ATA_CMD_DOWNLOAD_MICRO_DMA, "DOWNLOAD MICROCODE DMA" },
2422 { ATA_CMD_NOP, "NOP" },
2423 { ATA_CMD_FLUSH, "FLUSH CACHE" },
2424 { ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" },
2425 { ATA_CMD_ID_ATA, "IDENTIFY DEVICE" },
2426 { ATA_CMD_ID_ATAPI, "IDENTIFY PACKET DEVICE" },
2427 { ATA_CMD_SERVICE, "SERVICE" },
2428 { ATA_CMD_READ, "READ DMA" },
2429 { ATA_CMD_READ_EXT, "READ DMA EXT" },
2430 { ATA_CMD_READ_QUEUED, "READ DMA QUEUED" },
2431 { ATA_CMD_READ_STREAM_EXT, "READ STREAM EXT" },
2432 { ATA_CMD_READ_STREAM_DMA_EXT, "READ STREAM DMA EXT" },
2433 { ATA_CMD_WRITE, "WRITE DMA" },
2434 { ATA_CMD_WRITE_EXT, "WRITE DMA EXT" },
2435 { ATA_CMD_WRITE_QUEUED, "WRITE DMA QUEUED EXT" },
2436 { ATA_CMD_WRITE_STREAM_EXT, "WRITE STREAM EXT" },
2437 { ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" },
2438 { ATA_CMD_WRITE_FUA_EXT, "WRITE DMA FUA EXT" },
2439 { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
2440 { ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" },
2441 { ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" },
2442 { ATA_CMD_NCQ_NON_DATA, "NCQ NON-DATA" },
2443 { ATA_CMD_FPDMA_SEND, "SEND FPDMA QUEUED" },
2444 { ATA_CMD_FPDMA_RECV, "RECEIVE FPDMA QUEUED" },
2445 { ATA_CMD_PIO_READ, "READ SECTOR(S)" },
2446 { ATA_CMD_PIO_READ_EXT, "READ SECTOR(S) EXT" },
2447 { ATA_CMD_PIO_WRITE, "WRITE SECTOR(S)" },
2448 { ATA_CMD_PIO_WRITE_EXT, "WRITE SECTOR(S) EXT" },
2449 { ATA_CMD_READ_MULTI, "READ MULTIPLE" },
2450 { ATA_CMD_READ_MULTI_EXT, "READ MULTIPLE EXT" },
2451 { ATA_CMD_WRITE_MULTI, "WRITE MULTIPLE" },
2452 { ATA_CMD_WRITE_MULTI_EXT, "WRITE MULTIPLE EXT" },
2453 { ATA_CMD_WRITE_MULTI_FUA_EXT, "WRITE MULTIPLE FUA EXT" },
2454 { ATA_CMD_SET_FEATURES, "SET FEATURES" },
2455 { ATA_CMD_SET_MULTI, "SET MULTIPLE MODE" },
2456 { ATA_CMD_VERIFY, "READ VERIFY SECTOR(S)" },
2457 { ATA_CMD_VERIFY_EXT, "READ VERIFY SECTOR(S) EXT" },
2458 { ATA_CMD_WRITE_UNCORR_EXT, "WRITE UNCORRECTABLE EXT" },
2459 { ATA_CMD_STANDBYNOW1, "STANDBY IMMEDIATE" },
2460 { ATA_CMD_IDLEIMMEDIATE, "IDLE IMMEDIATE" },
2461 { ATA_CMD_SLEEP, "SLEEP" },
2462 { ATA_CMD_INIT_DEV_PARAMS, "INITIALIZE DEVICE PARAMETERS" },
2463 { ATA_CMD_READ_NATIVE_MAX, "READ NATIVE MAX ADDRESS" },
2464 { ATA_CMD_READ_NATIVE_MAX_EXT, "READ NATIVE MAX ADDRESS EXT" },
2465 { ATA_CMD_SET_MAX, "SET MAX ADDRESS" },
2466 { ATA_CMD_SET_MAX_EXT, "SET MAX ADDRESS EXT" },
2467 { ATA_CMD_READ_LOG_EXT, "READ LOG EXT" },
2468 { ATA_CMD_WRITE_LOG_EXT, "WRITE LOG EXT" },
2469 { ATA_CMD_READ_LOG_DMA_EXT, "READ LOG DMA EXT" },
2470 { ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" },
2471 { ATA_CMD_TRUSTED_NONDATA, "TRUSTED NON-DATA" },
2472 { ATA_CMD_TRUSTED_RCV, "TRUSTED RECEIVE" },
2473 { ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" },
2474 { ATA_CMD_TRUSTED_SND, "TRUSTED SEND" },
2475 { ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" },
2476 { ATA_CMD_PMP_READ, "READ BUFFER" },
2477 { ATA_CMD_PMP_READ_DMA, "READ BUFFER DMA" },
2478 { ATA_CMD_PMP_WRITE, "WRITE BUFFER" },
2479 { ATA_CMD_PMP_WRITE_DMA, "WRITE BUFFER DMA" },
2480 { ATA_CMD_CONF_OVERLAY, "DEVICE CONFIGURATION OVERLAY" },
2481 { ATA_CMD_SEC_SET_PASS, "SECURITY SET PASSWORD" },
2482 { ATA_CMD_SEC_UNLOCK, "SECURITY UNLOCK" },
2483 { ATA_CMD_SEC_ERASE_PREP, "SECURITY ERASE PREPARE" },
2484 { ATA_CMD_SEC_ERASE_UNIT, "SECURITY ERASE UNIT" },
2485 { ATA_CMD_SEC_FREEZE_LOCK, "SECURITY FREEZE LOCK" },
2486 { ATA_CMD_SEC_DISABLE_PASS, "SECURITY DISABLE PASSWORD" },
2487 { ATA_CMD_CONFIG_STREAM, "CONFIGURE STREAM" },
2488 { ATA_CMD_SMART, "SMART" },
2489 { ATA_CMD_MEDIA_LOCK, "DOOR LOCK" },
2490 { ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" },
2491 { ATA_CMD_DSM, "DATA SET MANAGEMENT" },
2492 { ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" },
2493 { ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" },
2494 { ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" },
2495 { ATA_CMD_CFA_TRANS_SECT, "CFA TRANSLATE SECTOR" },
2496 { ATA_CMD_CFA_ERASE, "CFA ERASE SECTORS" },
2497 { ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" },
2498 { ATA_CMD_REQ_SENSE_DATA, "REQUEST SENSE DATA EXT" },
2499 { ATA_CMD_SANITIZE_DEVICE, "SANITIZE DEVICE" },
2500 { ATA_CMD_ZAC_MGMT_IN, "ZAC MANAGEMENT IN" },
2501 { ATA_CMD_ZAC_MGMT_OUT, "ZAC MANAGEMENT OUT" },
2502 { ATA_CMD_READ_LONG, "READ LONG (with retries)" },
2503 { ATA_CMD_READ_LONG_ONCE, "READ LONG (without retries)" },
2504 { ATA_CMD_WRITE_LONG, "WRITE LONG (with retries)" },
2505 { ATA_CMD_WRITE_LONG_ONCE, "WRITE LONG (without retries)" },
2506 { ATA_CMD_RESTORE, "RECALIBRATE" },
2507 { 0, NULL } /* terminate list */
2511 for (i = 0; cmd_descr[i].text; i++)
2512 if (cmd_descr[i].command == command)
2513 return cmd_descr[i].text;
2518 EXPORT_SYMBOL_GPL(ata_get_cmd_descript);
2521 * ata_eh_link_report - report error handling to user
2522 * @link: ATA link EH is going on
2524 * Report EH to user.
2529 static void ata_eh_link_report(struct ata_link *link)
2531 struct ata_port *ap = link->ap;
2532 struct ata_eh_context *ehc = &link->eh_context;
2533 const char *frozen, *desc;
2534 char tries_buf[6] = "";
2535 int tag, nr_failed = 0;
2537 if (ehc->i.flags & ATA_EHI_QUIET)
2541 if (ehc->i.desc[0] != '\0')
2544 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2545 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2547 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2548 ata_dev_phys_link(qc->dev) != link ||
2549 ((qc->flags & ATA_QCFLAG_QUIET) &&
2550 qc->err_mask == AC_ERR_DEV))
2552 if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
2558 if (!nr_failed && !ehc->i.err_mask)
2562 if (ap->pflags & ATA_PFLAG_FROZEN)
2565 if (ap->eh_tries < ATA_EH_MAX_TRIES)
2566 snprintf(tries_buf, sizeof(tries_buf), " t%d",
2570 ata_dev_err(ehc->i.dev, "exception Emask 0x%x "
2571 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2572 ehc->i.err_mask, link->sactive, ehc->i.serror,
2573 ehc->i.action, frozen, tries_buf);
2575 ata_dev_err(ehc->i.dev, "%s\n", desc);
2577 ata_link_err(link, "exception Emask 0x%x "
2578 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2579 ehc->i.err_mask, link->sactive, ehc->i.serror,
2580 ehc->i.action, frozen, tries_buf);
2582 ata_link_err(link, "%s\n", desc);
2585 #ifdef CONFIG_ATA_VERBOSE_ERROR
2588 "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
2589 ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "",
2590 ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "",
2591 ehc->i.serror & SERR_DATA ? "UnrecovData " : "",
2592 ehc->i.serror & SERR_PERSISTENT ? "Persist " : "",
2593 ehc->i.serror & SERR_PROTOCOL ? "Proto " : "",
2594 ehc->i.serror & SERR_INTERNAL ? "HostInt " : "",
2595 ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "",
2596 ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "",
2597 ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "",
2598 ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "",
2599 ehc->i.serror & SERR_DISPARITY ? "Dispar " : "",
2600 ehc->i.serror & SERR_CRC ? "BadCRC " : "",
2601 ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "",
2602 ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "",
2603 ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "",
2604 ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "",
2605 ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
2608 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2609 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2610 struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
2611 char data_buf[20] = "";
2612 char cdb_buf[70] = "";
2614 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2615 ata_dev_phys_link(qc->dev) != link || !qc->err_mask)
2618 if (qc->dma_dir != DMA_NONE) {
2619 static const char *dma_str[] = {
2620 [DMA_BIDIRECTIONAL] = "bidi",
2621 [DMA_TO_DEVICE] = "out",
2622 [DMA_FROM_DEVICE] = "in",
2624 static const char *prot_str[] = {
2625 [ATA_PROT_UNKNOWN] = "unknown",
2626 [ATA_PROT_NODATA] = "nodata",
2627 [ATA_PROT_PIO] = "pio",
2628 [ATA_PROT_DMA] = "dma",
2629 [ATA_PROT_NCQ] = "ncq dma",
2630 [ATA_PROT_NCQ_NODATA] = "ncq nodata",
2631 [ATAPI_PROT_NODATA] = "nodata",
2632 [ATAPI_PROT_PIO] = "pio",
2633 [ATAPI_PROT_DMA] = "dma",
2636 snprintf(data_buf, sizeof(data_buf), " %s %u %s",
2637 prot_str[qc->tf.protocol], qc->nbytes,
2638 dma_str[qc->dma_dir]);
2641 if (ata_is_atapi(qc->tf.protocol)) {
2642 const u8 *cdb = qc->cdb;
2643 size_t cdb_len = qc->dev->cdb_len;
2646 cdb = qc->scsicmd->cmnd;
2647 cdb_len = qc->scsicmd->cmd_len;
2649 __scsi_format_command(cdb_buf, sizeof(cdb_buf),
2652 const char *descr = ata_get_cmd_descript(cmd->command);
2654 ata_dev_err(qc->dev, "failed command: %s\n",
2658 ata_dev_err(qc->dev,
2659 "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2661 "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2662 "Emask 0x%x (%s)%s\n",
2663 cmd->command, cmd->feature, cmd->nsect,
2664 cmd->lbal, cmd->lbam, cmd->lbah,
2665 cmd->hob_feature, cmd->hob_nsect,
2666 cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah,
2667 cmd->device, qc->tag, data_buf, cdb_buf,
2668 res->command, res->feature, res->nsect,
2669 res->lbal, res->lbam, res->lbah,
2670 res->hob_feature, res->hob_nsect,
2671 res->hob_lbal, res->hob_lbam, res->hob_lbah,
2672 res->device, qc->err_mask, ata_err_string(qc->err_mask),
2673 qc->err_mask & AC_ERR_NCQ ? " <F>" : "");
2675 #ifdef CONFIG_ATA_VERBOSE_ERROR
2676 if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
2677 ATA_SENSE | ATA_ERR)) {
2678 if (res->command & ATA_BUSY)
2679 ata_dev_err(qc->dev, "status: { Busy }\n");
2681 ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n",
2682 res->command & ATA_DRDY ? "DRDY " : "",
2683 res->command & ATA_DF ? "DF " : "",
2684 res->command & ATA_DRQ ? "DRQ " : "",
2685 res->command & ATA_SENSE ? "SENSE " : "",
2686 res->command & ATA_ERR ? "ERR " : "");
2689 if (cmd->command != ATA_CMD_PACKET &&
2690 (res->feature & (ATA_ICRC | ATA_UNC | ATA_AMNF |
2691 ATA_IDNF | ATA_ABORTED)))
2692 ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n",
2693 res->feature & ATA_ICRC ? "ICRC " : "",
2694 res->feature & ATA_UNC ? "UNC " : "",
2695 res->feature & ATA_AMNF ? "AMNF " : "",
2696 res->feature & ATA_IDNF ? "IDNF " : "",
2697 res->feature & ATA_ABORTED ? "ABRT " : "");
2703 * ata_eh_report - report error handling to user
2704 * @ap: ATA port to report EH about
2706 * Report EH to user.
2711 void ata_eh_report(struct ata_port *ap)
2713 struct ata_link *link;
2715 ata_for_each_link(link, ap, HOST_FIRST)
2716 ata_eh_link_report(link);
2719 static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
2720 unsigned int *classes, unsigned long deadline,
2723 struct ata_device *dev;
2726 ata_for_each_dev(dev, link, ALL)
2727 classes[dev->devno] = ATA_DEV_UNKNOWN;
2729 return reset(link, classes, deadline);
2732 static int ata_eh_followup_srst_needed(struct ata_link *link, int rc)
2734 if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link))
2738 if (sata_pmp_supported(link->ap) && ata_is_host_link(link))
2743 int ata_eh_reset(struct ata_link *link, int classify,
2744 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
2745 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
2747 struct ata_port *ap = link->ap;
2748 struct ata_link *slave = ap->slave_link;
2749 struct ata_eh_context *ehc = &link->eh_context;
2750 struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL;
2751 unsigned int *classes = ehc->classes;
2752 unsigned int lflags = link->flags;
2753 int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
2754 int max_tries = 0, try = 0;
2755 struct ata_link *failed_link;
2756 struct ata_device *dev;
2757 unsigned long deadline, now;
2758 ata_reset_fn_t reset;
2759 unsigned long flags;
2766 while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX)
2768 if (link->flags & ATA_LFLAG_RST_ONCE)
2770 if (link->flags & ATA_LFLAG_NO_HRST)
2772 if (link->flags & ATA_LFLAG_NO_SRST)
2775 /* make sure each reset attempt is at least COOL_DOWN apart */
2776 if (ehc->i.flags & ATA_EHI_DID_RESET) {
2778 WARN_ON(time_after(ehc->last_reset, now));
2779 deadline = ata_deadline(ehc->last_reset,
2780 ATA_EH_RESET_COOL_DOWN);
2781 if (time_before(now, deadline))
2782 schedule_timeout_uninterruptible(deadline - now);
2785 spin_lock_irqsave(ap->lock, flags);
2786 ap->pflags |= ATA_PFLAG_RESETTING;
2787 spin_unlock_irqrestore(ap->lock, flags);
2789 ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2791 ata_for_each_dev(dev, link, ALL) {
2792 /* If we issue an SRST then an ATA drive (not ATAPI)
2793 * may change configuration and be in PIO0 timing. If
2794 * we do a hard reset (or are coming from power on)
2795 * this is true for ATA or ATAPI. Until we've set a
2796 * suitable controller mode we should not touch the
2797 * bus as we may be talking too fast.
2799 dev->pio_mode = XFER_PIO_0;
2800 dev->dma_mode = 0xff;
2802 /* If the controller has a pio mode setup function
2803 * then use it to set the chipset to rights. Don't
2804 * touch the DMA setup as that will be dealt with when
2805 * configuring devices.
2807 if (ap->ops->set_piomode)
2808 ap->ops->set_piomode(ap, dev);
2811 /* prefer hardreset */
2813 ehc->i.action &= ~ATA_EH_RESET;
2816 ehc->i.action |= ATA_EH_HARDRESET;
2817 } else if (softreset) {
2819 ehc->i.action |= ATA_EH_SOFTRESET;
2823 unsigned long deadline = ata_deadline(jiffies,
2824 ATA_EH_PRERESET_TIMEOUT);
2827 sehc->i.action &= ~ATA_EH_RESET;
2828 sehc->i.action |= ehc->i.action;
2831 rc = prereset(link, deadline);
2833 /* If present, do prereset on slave link too. Reset
2834 * is skipped iff both master and slave links report
2835 * -ENOENT or clear ATA_EH_RESET.
2837 if (slave && (rc == 0 || rc == -ENOENT)) {
2840 tmp = prereset(slave, deadline);
2844 ehc->i.action |= sehc->i.action;
2848 if (rc == -ENOENT) {
2849 ata_link_dbg(link, "port disabled--ignoring\n");
2850 ehc->i.action &= ~ATA_EH_RESET;
2852 ata_for_each_dev(dev, link, ALL)
2853 classes[dev->devno] = ATA_DEV_NONE;
2858 "prereset failed (errno=%d)\n",
2863 /* prereset() might have cleared ATA_EH_RESET. If so,
2864 * bang classes, thaw and return.
2866 if (reset && !(ehc->i.action & ATA_EH_RESET)) {
2867 ata_for_each_dev(dev, link, ALL)
2868 classes[dev->devno] = ATA_DEV_NONE;
2869 if ((ap->pflags & ATA_PFLAG_FROZEN) &&
2870 ata_is_host_link(link))
2871 ata_eh_thaw_port(ap);
2881 if (ata_is_host_link(link))
2882 ata_eh_freeze_port(ap);
2884 deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]);
2888 ata_link_info(link, "%s resetting link\n",
2889 reset == softreset ? "soft" : "hard");
2891 /* mark that this EH session started with reset */
2892 ehc->last_reset = jiffies;
2893 if (reset == hardreset)
2894 ehc->i.flags |= ATA_EHI_DID_HARDRESET;
2896 ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
2898 rc = ata_do_reset(link, reset, classes, deadline, true);
2899 if (rc && rc != -EAGAIN) {
2904 /* hardreset slave link if existent */
2905 if (slave && reset == hardreset) {
2909 ata_link_info(slave, "hard resetting link\n");
2911 ata_eh_about_to_do(slave, NULL, ATA_EH_RESET);
2912 tmp = ata_do_reset(slave, reset, classes, deadline,
2920 failed_link = slave;
2926 /* perform follow-up SRST if necessary */
2927 if (reset == hardreset &&
2928 ata_eh_followup_srst_needed(link, rc)) {
2933 "follow-up softreset required but no softreset available\n");
2939 ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2940 rc = ata_do_reset(link, reset, classes, deadline, true);
2949 "no reset method available, skipping reset\n");
2950 if (!(lflags & ATA_LFLAG_ASSUME_CLASS))
2951 lflags |= ATA_LFLAG_ASSUME_ATA;
2955 * Post-reset processing
2957 ata_for_each_dev(dev, link, ALL) {
2958 /* After the reset, the device state is PIO 0 and the
2959 * controller state is undefined. Reset also wakes up
2960 * drives from sleeping mode.
2962 dev->pio_mode = XFER_PIO_0;
2963 dev->flags &= ~ATA_DFLAG_SLEEPING;
2965 if (ata_phys_link_offline(ata_dev_phys_link(dev)))
2968 /* apply class override */
2969 if (lflags & ATA_LFLAG_ASSUME_ATA)
2970 classes[dev->devno] = ATA_DEV_ATA;
2971 else if (lflags & ATA_LFLAG_ASSUME_SEMB)
2972 classes[dev->devno] = ATA_DEV_SEMB_UNSUP;
2975 /* record current link speed */
2976 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0)
2977 link->sata_spd = (sstatus >> 4) & 0xf;
2978 if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0)
2979 slave->sata_spd = (sstatus >> 4) & 0xf;
2982 if (ata_is_host_link(link))
2983 ata_eh_thaw_port(ap);
2985 /* postreset() should clear hardware SError. Although SError
2986 * is cleared during link resume, clearing SError here is
2987 * necessary as some PHYs raise hotplug events after SRST.
2988 * This introduces race condition where hotplug occurs between
2989 * reset and here. This race is mediated by cross checking
2990 * link onlineness and classification result later.
2993 postreset(link, classes);
2995 postreset(slave, classes);
2999 * Some controllers can't be frozen very well and may set spurious
3000 * error conditions during reset. Clear accumulated error
3001 * information and re-thaw the port if frozen. As reset is the
3002 * final recovery action and we cross check link onlineness against
3003 * device classification later, no hotplug event is lost by this.
3005 spin_lock_irqsave(link->ap->lock, flags);
3006 memset(&link->eh_info, 0, sizeof(link->eh_info));
3008 memset(&slave->eh_info, 0, sizeof(link->eh_info));
3009 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
3010 spin_unlock_irqrestore(link->ap->lock, flags);
3012 if (ap->pflags & ATA_PFLAG_FROZEN)
3013 ata_eh_thaw_port(ap);
3016 * Make sure onlineness and classification result correspond.
3017 * Hotplug could have happened during reset and some
3018 * controllers fail to wait while a drive is spinning up after
3019 * being hotplugged causing misdetection. By cross checking
3020 * link on/offlineness and classification result, those
3021 * conditions can be reliably detected and retried.
3024 ata_for_each_dev(dev, link, ALL) {
3025 if (ata_phys_link_online(ata_dev_phys_link(dev))) {
3026 if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
3027 ata_dev_dbg(dev, "link online but device misclassified\n");
3028 classes[dev->devno] = ATA_DEV_NONE;
3031 } else if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
3032 if (ata_class_enabled(classes[dev->devno]))
3034 "link offline, clearing class %d to NONE\n",
3035 classes[dev->devno]);
3036 classes[dev->devno] = ATA_DEV_NONE;
3037 } else if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
3039 "link status unknown, clearing UNKNOWN to NONE\n");
3040 classes[dev->devno] = ATA_DEV_NONE;
3044 if (classify && nr_unknown) {
3045 if (try < max_tries) {
3047 "link online but %d devices misclassified, retrying\n",
3054 "link online but %d devices misclassified, "
3055 "device detection might fail\n", nr_unknown);
3058 /* reset successful, schedule revalidation */
3059 ata_eh_done(link, NULL, ATA_EH_RESET);
3061 ata_eh_done(slave, NULL, ATA_EH_RESET);
3062 ehc->last_reset = jiffies; /* update to completion time */
3063 ehc->i.action |= ATA_EH_REVALIDATE;
3064 link->lpm_policy = ATA_LPM_UNKNOWN; /* reset LPM state */
3068 /* clear hotplug flag */
3069 ehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
3071 sehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
3073 spin_lock_irqsave(ap->lock, flags);
3074 ap->pflags &= ~ATA_PFLAG_RESETTING;
3075 spin_unlock_irqrestore(ap->lock, flags);
3080 /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */
3081 if (!ata_is_host_link(link) &&
3082 sata_scr_read(link, SCR_STATUS, &sstatus))
3085 if (try >= max_tries) {
3087 * Thaw host port even if reset failed, so that the port
3088 * can be retried on the next phy event. This risks
3089 * repeated EH runs but seems to be a better tradeoff than
3090 * shutting down a port after a botched hotplug attempt.
3092 if (ata_is_host_link(link))
3093 ata_eh_thaw_port(ap);
3098 if (time_before(now, deadline)) {
3099 unsigned long delta = deadline - now;
3101 ata_link_warn(failed_link,
3102 "reset failed (errno=%d), retrying in %u secs\n",
3103 rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000));
3107 delta = schedule_timeout_uninterruptible(delta);
3112 * While disks spinup behind PMP, some controllers fail sending SRST.
3113 * They need to be reset - as well as the PMP - before retrying.
3115 if (rc == -ERESTART) {
3116 if (ata_is_host_link(link))
3117 ata_eh_thaw_port(ap);
3121 if (try == max_tries - 1) {
3122 sata_down_spd_limit(link, 0);
3124 sata_down_spd_limit(slave, 0);
3125 } else if (rc == -EPIPE)
3126 sata_down_spd_limit(failed_link, 0);
3133 static inline void ata_eh_pull_park_action(struct ata_port *ap)
3135 struct ata_link *link;
3136 struct ata_device *dev;
3137 unsigned long flags;
3140 * This function can be thought of as an extended version of
3141 * ata_eh_about_to_do() specially crafted to accommodate the
3142 * requirements of ATA_EH_PARK handling. Since the EH thread
3143 * does not leave the do {} while () loop in ata_eh_recover as
3144 * long as the timeout for a park request to *one* device on
3145 * the port has not expired, and since we still want to pick
3146 * up park requests to other devices on the same port or
3147 * timeout updates for the same device, we have to pull
3148 * ATA_EH_PARK actions from eh_info into eh_context.i
3149 * ourselves at the beginning of each pass over the loop.
3151 * Additionally, all write accesses to &ap->park_req_pending
3152 * through reinit_completion() (see below) or complete_all()
3153 * (see ata_scsi_park_store()) are protected by the host lock.
3154 * As a result we have that park_req_pending.done is zero on
3155 * exit from this function, i.e. when ATA_EH_PARK actions for
3156 * *all* devices on port ap have been pulled into the
3157 * respective eh_context structs. If, and only if,
3158 * park_req_pending.done is non-zero by the time we reach
3159 * wait_for_completion_timeout(), another ATA_EH_PARK action
3160 * has been scheduled for at least one of the devices on port
3161 * ap and we have to cycle over the do {} while () loop in
3162 * ata_eh_recover() again.
3165 spin_lock_irqsave(ap->lock, flags);
3166 reinit_completion(&ap->park_req_pending);
3167 ata_for_each_link(link, ap, EDGE) {
3168 ata_for_each_dev(dev, link, ALL) {
3169 struct ata_eh_info *ehi = &link->eh_info;
3171 link->eh_context.i.dev_action[dev->devno] |=
3172 ehi->dev_action[dev->devno] & ATA_EH_PARK;
3173 ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK);
3176 spin_unlock_irqrestore(ap->lock, flags);
3179 static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
3181 struct ata_eh_context *ehc = &dev->link->eh_context;
3182 struct ata_taskfile tf;
3183 unsigned int err_mask;
3185 ata_tf_init(dev, &tf);
3187 ehc->unloaded_mask |= 1 << dev->devno;
3188 tf.command = ATA_CMD_IDLEIMMEDIATE;
3194 ehc->unloaded_mask &= ~(1 << dev->devno);
3195 tf.command = ATA_CMD_CHK_POWER;
3198 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
3199 tf.protocol = ATA_PROT_NODATA;
3200 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
3201 if (park && (err_mask || tf.lbal != 0xc4)) {
3202 ata_dev_err(dev, "head unload failed!\n");
3203 ehc->unloaded_mask &= ~(1 << dev->devno);
3207 static int ata_eh_revalidate_and_attach(struct ata_link *link,
3208 struct ata_device **r_failed_dev)
3210 struct ata_port *ap = link->ap;
3211 struct ata_eh_context *ehc = &link->eh_context;
3212 struct ata_device *dev;
3213 unsigned int new_mask = 0;
3214 unsigned long flags;
3219 /* For PATA drive side cable detection to work, IDENTIFY must
3220 * be done backwards such that PDIAG- is released by the slave
3221 * device before the master device is identified.
3223 ata_for_each_dev(dev, link, ALL_REVERSE) {
3224 unsigned int action = ata_eh_dev_action(dev);
3225 unsigned int readid_flags = 0;
3227 if (ehc->i.flags & ATA_EHI_DID_RESET)
3228 readid_flags |= ATA_READID_POSTRESET;
3230 if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
3231 WARN_ON(dev->class == ATA_DEV_PMP);
3233 if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
3238 ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE);
3239 rc = ata_dev_revalidate(dev, ehc->classes[dev->devno],
3244 ata_eh_done(link, dev, ATA_EH_REVALIDATE);
3246 /* Configuration may have changed, reconfigure
3249 ehc->i.flags |= ATA_EHI_SETMODE;
3251 /* schedule the scsi_rescan_device() here */
3252 schedule_work(&(ap->scsi_rescan_task));
3253 } else if (dev->class == ATA_DEV_UNKNOWN &&
3254 ehc->tries[dev->devno] &&
3255 ata_class_enabled(ehc->classes[dev->devno])) {
3256 /* Temporarily set dev->class, it will be
3257 * permanently set once all configurations are
3258 * complete. This is necessary because new
3259 * device configuration is done in two
3262 dev->class = ehc->classes[dev->devno];
3264 if (dev->class == ATA_DEV_PMP)
3265 rc = sata_pmp_attach(dev);
3267 rc = ata_dev_read_id(dev, &dev->class,
3268 readid_flags, dev->id);
3270 /* read_id might have changed class, store and reset */
3271 ehc->classes[dev->devno] = dev->class;
3272 dev->class = ATA_DEV_UNKNOWN;
3276 /* clear error info accumulated during probe */
3277 ata_ering_clear(&dev->ering);
3278 new_mask |= 1 << dev->devno;
3281 /* IDENTIFY was issued to non-existent
3282 * device. No need to reset. Just
3283 * thaw and ignore the device.
3285 ata_eh_thaw_port(ap);
3293 /* PDIAG- should have been released, ask cable type if post-reset */
3294 if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) {
3295 if (ap->ops->cable_detect)
3296 ap->cbl = ap->ops->cable_detect(ap);
3300 /* Configure new devices forward such that user doesn't see
3301 * device detection messages backwards.
3303 ata_for_each_dev(dev, link, ALL) {
3304 if (!(new_mask & (1 << dev->devno)))
3307 dev->class = ehc->classes[dev->devno];
3309 if (dev->class == ATA_DEV_PMP)
3312 ehc->i.flags |= ATA_EHI_PRINTINFO;
3313 rc = ata_dev_configure(dev);
3314 ehc->i.flags &= ~ATA_EHI_PRINTINFO;
3316 dev->class = ATA_DEV_UNKNOWN;
3320 spin_lock_irqsave(ap->lock, flags);
3321 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
3322 spin_unlock_irqrestore(ap->lock, flags);
3324 /* new device discovered, configure xfermode */
3325 ehc->i.flags |= ATA_EHI_SETMODE;
3331 *r_failed_dev = dev;
3332 DPRINTK("EXIT rc=%d\n", rc);
3337 * ata_set_mode - Program timings and issue SET FEATURES - XFER
3338 * @link: link on which timings will be programmed
3339 * @r_failed_dev: out parameter for failed device
3341 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3342 * ata_set_mode() fails, pointer to the failing device is
3343 * returned in @r_failed_dev.
3346 * PCI/etc. bus probe sem.
3349 * 0 on success, negative errno otherwise
3351 int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3353 struct ata_port *ap = link->ap;
3354 struct ata_device *dev;
3357 /* if data transfer is verified, clear DUBIOUS_XFER on ering top */
3358 ata_for_each_dev(dev, link, ENABLED) {
3359 if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) {
3360 struct ata_ering_entry *ent;
3362 ent = ata_ering_top(&dev->ering);
3364 ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER;
3368 /* has private set_mode? */
3369 if (ap->ops->set_mode)
3370 rc = ap->ops->set_mode(link, r_failed_dev);
3372 rc = ata_do_set_mode(link, r_failed_dev);
3374 /* if transfer mode has changed, set DUBIOUS_XFER on device */
3375 ata_for_each_dev(dev, link, ENABLED) {
3376 struct ata_eh_context *ehc = &link->eh_context;
3377 u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno];
3378 u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno));
3380 if (dev->xfer_mode != saved_xfer_mode ||
3381 ata_ncq_enabled(dev) != saved_ncq)
3382 dev->flags |= ATA_DFLAG_DUBIOUS_XFER;
3389 * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset
3390 * @dev: ATAPI device to clear UA for
3392 * Resets and other operations can make an ATAPI device raise
3393 * UNIT ATTENTION which causes the next operation to fail. This
3394 * function clears UA.
3397 * EH context (may sleep).
3400 * 0 on success, -errno on failure.
3402 static int atapi_eh_clear_ua(struct ata_device *dev)
3406 for (i = 0; i < ATA_EH_UA_TRIES; i++) {
3407 u8 *sense_buffer = dev->link->ap->sector_buf;
3409 unsigned int err_mask;
3411 err_mask = atapi_eh_tur(dev, &sense_key);
3412 if (err_mask != 0 && err_mask != AC_ERR_DEV) {
3414 "TEST_UNIT_READY failed (err_mask=0x%x)\n",
3419 if (!err_mask || sense_key != UNIT_ATTENTION)
3422 err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key);
3424 ata_dev_warn(dev, "failed to clear "
3425 "UNIT ATTENTION (err_mask=0x%x)\n", err_mask);
3430 ata_dev_warn(dev, "UNIT ATTENTION persists after %d tries\n",
3437 * ata_eh_maybe_retry_flush - Retry FLUSH if necessary
3438 * @dev: ATA device which may need FLUSH retry
3440 * If @dev failed FLUSH, it needs to be reported upper layer
3441 * immediately as it means that @dev failed to remap and already
3442 * lost at least a sector and further FLUSH retrials won't make
3443 * any difference to the lost sector. However, if FLUSH failed
3444 * for other reasons, for example transmission error, FLUSH needs
3447 * This function determines whether FLUSH failure retry is
3448 * necessary and performs it if so.
3451 * 0 if EH can continue, -errno if EH needs to be repeated.
3453 static int ata_eh_maybe_retry_flush(struct ata_device *dev)
3455 struct ata_link *link = dev->link;
3456 struct ata_port *ap = link->ap;
3457 struct ata_queued_cmd *qc;
3458 struct ata_taskfile tf;
3459 unsigned int err_mask;
3462 /* did flush fail for this device? */
3463 if (!ata_tag_valid(link->active_tag))
3466 qc = __ata_qc_from_tag(ap, link->active_tag);
3467 if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT &&
3468 qc->tf.command != ATA_CMD_FLUSH))
3471 /* if the device failed it, it should be reported to upper layers */
3472 if (qc->err_mask & AC_ERR_DEV)
3475 /* flush failed for some other reason, give it another shot */
3476 ata_tf_init(dev, &tf);
3478 tf.command = qc->tf.command;
3479 tf.flags |= ATA_TFLAG_DEVICE;
3480 tf.protocol = ATA_PROT_NODATA;
3482 ata_dev_warn(dev, "retrying FLUSH 0x%x Emask 0x%x\n",
3483 tf.command, qc->err_mask);
3485 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
3488 * FLUSH is complete but there's no way to
3489 * successfully complete a failed command from EH.
3490 * Making sure retry is allowed at least once and
3491 * retrying it should do the trick - whatever was in
3492 * the cache is already on the platter and this won't
3493 * cause infinite loop.
3495 qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1);
3497 ata_dev_warn(dev, "FLUSH failed Emask 0x%x\n",
3501 /* if device failed it, report it to upper layers */
3502 if (err_mask & AC_ERR_DEV) {
3503 qc->err_mask |= AC_ERR_DEV;
3505 if (!(ap->pflags & ATA_PFLAG_FROZEN))
3513 * ata_eh_set_lpm - configure SATA interface power management
3514 * @link: link to configure power management
3515 * @policy: the link power management policy
3516 * @r_failed_dev: out parameter for failed device
3518 * Enable SATA Interface power management. This will enable
3519 * Device Interface Power Management (DIPM) for min_power
3520 * policy, and then call driver specific callbacks for
3521 * enabling Host Initiated Power management.
3527 * 0 on success, -errno on failure.
3529 static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3530 struct ata_device **r_failed_dev)
3532 struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL;
3533 struct ata_eh_context *ehc = &link->eh_context;
3534 struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL;
3535 enum ata_lpm_policy old_policy = link->lpm_policy;
3536 bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM;
3537 unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM;
3538 unsigned int err_mask;
3541 /* if the link or host doesn't do LPM, noop */
3542 if ((link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm))
3546 * DIPM is enabled only for MIN_POWER as some devices
3547 * misbehave when the host NACKs transition to SLUMBER. Order
3548 * device and link configurations such that the host always
3549 * allows DIPM requests.
3551 ata_for_each_dev(dev, link, ENABLED) {
3552 bool hipm = ata_id_has_hipm(dev->id);
3553 bool dipm = ata_id_has_dipm(dev->id) && !no_dipm;
3555 /* find the first enabled and LPM enabled devices */
3559 if (!lpm_dev && (hipm || dipm))
3562 hints &= ~ATA_LPM_EMPTY;
3564 hints &= ~ATA_LPM_HIPM;
3566 /* disable DIPM before changing link config */
3567 if (policy != ATA_LPM_MIN_POWER && dipm) {
3568 err_mask = ata_dev_set_feature(dev,
3569 SETFEATURES_SATA_DISABLE, SATA_DIPM);
3570 if (err_mask && err_mask != AC_ERR_DEV) {
3572 "failed to disable DIPM, Emask 0x%x\n",
3581 rc = ap->ops->set_lpm(link, policy, hints);
3582 if (!rc && ap->slave_link)
3583 rc = ap->ops->set_lpm(ap->slave_link, policy, hints);
3585 rc = sata_pmp_set_lpm(link, policy, hints);
3588 * Attribute link config failure to the first (LPM) enabled
3589 * device on the link.
3592 if (rc == -EOPNOTSUPP) {
3593 link->flags |= ATA_LFLAG_NO_LPM;
3596 dev = lpm_dev ? lpm_dev : link_dev;
3601 * Low level driver acked the transition. Issue DIPM command
3602 * with the new policy set.
3604 link->lpm_policy = policy;
3605 if (ap && ap->slave_link)
3606 ap->slave_link->lpm_policy = policy;
3608 /* host config updated, enable DIPM if transitioning to MIN_POWER */
3609 ata_for_each_dev(dev, link, ENABLED) {
3610 if (policy == ATA_LPM_MIN_POWER && !no_dipm &&
3611 ata_id_has_dipm(dev->id)) {
3612 err_mask = ata_dev_set_feature(dev,
3613 SETFEATURES_SATA_ENABLE, SATA_DIPM);
3614 if (err_mask && err_mask != AC_ERR_DEV) {
3616 "failed to enable DIPM, Emask 0x%x\n",
3624 link->last_lpm_change = jiffies;
3625 link->flags |= ATA_LFLAG_CHANGED;
3630 /* restore the old policy */
3631 link->lpm_policy = old_policy;
3632 if (ap && ap->slave_link)
3633 ap->slave_link->lpm_policy = old_policy;
3635 /* if no device or only one more chance is left, disable LPM */
3636 if (!dev || ehc->tries[dev->devno] <= 2) {
3637 ata_link_warn(link, "disabling LPM on the link\n");
3638 link->flags |= ATA_LFLAG_NO_LPM;
3641 *r_failed_dev = dev;
3645 int ata_link_nr_enabled(struct ata_link *link)
3647 struct ata_device *dev;
3650 ata_for_each_dev(dev, link, ENABLED)
3655 static int ata_link_nr_vacant(struct ata_link *link)
3657 struct ata_device *dev;
3660 ata_for_each_dev(dev, link, ALL)
3661 if (dev->class == ATA_DEV_UNKNOWN)
3666 static int ata_eh_skip_recovery(struct ata_link *link)
3668 struct ata_port *ap = link->ap;
3669 struct ata_eh_context *ehc = &link->eh_context;
3670 struct ata_device *dev;
3672 /* skip disabled links */
3673 if (link->flags & ATA_LFLAG_DISABLED)
3676 /* skip if explicitly requested */
3677 if (ehc->i.flags & ATA_EHI_NO_RECOVERY)
3680 /* thaw frozen port and recover failed devices */
3681 if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
3684 /* reset at least once if reset is requested */
3685 if ((ehc->i.action & ATA_EH_RESET) &&
3686 !(ehc->i.flags & ATA_EHI_DID_RESET))
3689 /* skip if class codes for all vacant slots are ATA_DEV_NONE */
3690 ata_for_each_dev(dev, link, ALL) {
3691 if (dev->class == ATA_DEV_UNKNOWN &&
3692 ehc->classes[dev->devno] != ATA_DEV_NONE)
3699 static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg)
3701 u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL);
3702 u64 now = get_jiffies_64();
3703 int *trials = void_arg;
3705 if ((ent->eflags & ATA_EFLAG_OLD_ER) ||
3706 (ent->timestamp < now - min(now, interval)))
3713 static int ata_eh_schedule_probe(struct ata_device *dev)
3715 struct ata_eh_context *ehc = &dev->link->eh_context;
3716 struct ata_link *link = ata_dev_phys_link(dev);
3719 if (!(ehc->i.probe_mask & (1 << dev->devno)) ||
3720 (ehc->did_probe_mask & (1 << dev->devno)))
3723 ata_eh_detach_dev(dev);
3725 ehc->did_probe_mask |= (1 << dev->devno);
3726 ehc->i.action |= ATA_EH_RESET;
3727 ehc->saved_xfer_mode[dev->devno] = 0;
3728 ehc->saved_ncq_enabled &= ~(1 << dev->devno);
3730 /* the link maybe in a deep sleep, wake it up */
3731 if (link->lpm_policy > ATA_LPM_MAX_POWER) {
3732 if (ata_is_host_link(link))
3733 link->ap->ops->set_lpm(link, ATA_LPM_MAX_POWER,
3736 sata_pmp_set_lpm(link, ATA_LPM_MAX_POWER,
3740 /* Record and count probe trials on the ering. The specific
3741 * error mask used is irrelevant. Because a successful device
3742 * detection clears the ering, this count accumulates only if
3743 * there are consecutive failed probes.
3745 * If the count is equal to or higher than ATA_EH_PROBE_TRIALS
3746 * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is
3747 * forced to 1.5Gbps.
3749 * This is to work around cases where failed link speed
3750 * negotiation results in device misdetection leading to
3751 * infinite DEVXCHG or PHRDY CHG events.
3753 ata_ering_record(&dev->ering, 0, AC_ERR_OTHER);
3754 ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials);
3756 if (trials > ATA_EH_PROBE_TRIALS)
3757 sata_down_spd_limit(link, 1);
3762 static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
3764 struct ata_eh_context *ehc = &dev->link->eh_context;
3766 /* -EAGAIN from EH routine indicates retry without prejudice.
3767 * The requester is responsible for ensuring forward progress.
3770 ehc->tries[dev->devno]--;
3774 /* device missing or wrong IDENTIFY data, schedule probing */
3775 ehc->i.probe_mask |= (1 << dev->devno);
3777 /* give it just one more chance */
3778 ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
3780 if (ehc->tries[dev->devno] == 1) {
3781 /* This is the last chance, better to slow
3782 * down than lose it.
3784 sata_down_spd_limit(ata_dev_phys_link(dev), 0);
3785 if (dev->pio_mode > XFER_PIO_0)
3786 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
3790 if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
3791 /* disable device if it has used up all its chances */
3792 ata_dev_disable(dev);
3794 /* detach if offline */
3795 if (ata_phys_link_offline(ata_dev_phys_link(dev)))
3796 ata_eh_detach_dev(dev);
3798 /* schedule probe if necessary */
3799 if (ata_eh_schedule_probe(dev)) {
3800 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
3801 memset(ehc->cmd_timeout_idx[dev->devno], 0,
3802 sizeof(ehc->cmd_timeout_idx[dev->devno]));
3807 ehc->i.action |= ATA_EH_RESET;
3813 * ata_eh_recover - recover host port after error
3814 * @ap: host port to recover
3815 * @prereset: prereset method (can be NULL)
3816 * @softreset: softreset method (can be NULL)
3817 * @hardreset: hardreset method (can be NULL)
3818 * @postreset: postreset method (can be NULL)
3819 * @r_failed_link: out parameter for failed link
3821 * This is the alpha and omega, eum and yang, heart and soul of
3822 * libata exception handling. On entry, actions required to
3823 * recover each link and hotplug requests are recorded in the
3824 * link's eh_context. This function executes all the operations
3825 * with appropriate retrials and fallbacks to resurrect failed
3826 * devices, detach goners and greet newcomers.
3829 * Kernel thread context (may sleep).
3832 * 0 on success, -errno on failure.
3834 int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3835 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3836 ata_postreset_fn_t postreset,
3837 struct ata_link **r_failed_link)
3839 struct ata_link *link;
3840 struct ata_device *dev;
3842 unsigned long flags, deadline;
3846 /* prep for recovery */
3847 ata_for_each_link(link, ap, EDGE) {
3848 struct ata_eh_context *ehc = &link->eh_context;
3850 /* re-enable link? */
3851 if (ehc->i.action & ATA_EH_ENABLE_LINK) {
3852 ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK);
3853 spin_lock_irqsave(ap->lock, flags);
3854 link->flags &= ~ATA_LFLAG_DISABLED;
3855 spin_unlock_irqrestore(ap->lock, flags);
3856 ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK);
3859 ata_for_each_dev(dev, link, ALL) {
3860 if (link->flags & ATA_LFLAG_NO_RETRY)
3861 ehc->tries[dev->devno] = 1;
3863 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
3865 /* collect port action mask recorded in dev actions */
3866 ehc->i.action |= ehc->i.dev_action[dev->devno] &
3867 ~ATA_EH_PERDEV_MASK;
3868 ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK;
3870 /* process hotplug request */
3871 if (dev->flags & ATA_DFLAG_DETACH)
3872 ata_eh_detach_dev(dev);
3874 /* schedule probe if necessary */
3875 if (!ata_dev_enabled(dev))
3876 ata_eh_schedule_probe(dev);
3883 /* if UNLOADING, finish immediately */
3884 if (ap->pflags & ATA_PFLAG_UNLOADING)
3888 ata_for_each_link(link, ap, EDGE) {
3889 struct ata_eh_context *ehc = &link->eh_context;
3891 /* skip EH if possible. */
3892 if (ata_eh_skip_recovery(link))
3895 ata_for_each_dev(dev, link, ALL)
3896 ehc->classes[dev->devno] = ATA_DEV_UNKNOWN;
3900 ata_for_each_link(link, ap, EDGE) {
3901 struct ata_eh_context *ehc = &link->eh_context;
3903 if (!(ehc->i.action & ATA_EH_RESET))
3906 rc = ata_eh_reset(link, ata_link_nr_vacant(link),
3907 prereset, softreset, hardreset, postreset);
3909 ata_link_err(link, "reset failed, giving up\n");
3918 * clears ATA_EH_PARK in eh_info and resets
3919 * ap->park_req_pending
3921 ata_eh_pull_park_action(ap);
3924 ata_for_each_link(link, ap, EDGE) {
3925 ata_for_each_dev(dev, link, ALL) {
3926 struct ata_eh_context *ehc = &link->eh_context;
3929 if (dev->class != ATA_DEV_ATA &&
3930 dev->class != ATA_DEV_ZAC)
3932 if (!(ehc->i.dev_action[dev->devno] &
3935 tmp = dev->unpark_deadline;
3936 if (time_before(deadline, tmp))
3938 else if (time_before_eq(tmp, jiffies))
3940 if (ehc->unloaded_mask & (1 << dev->devno))
3943 ata_eh_park_issue_cmd(dev, 1);
3948 if (time_before_eq(deadline, now))
3952 deadline = wait_for_completion_timeout(&ap->park_req_pending,
3956 ata_for_each_link(link, ap, EDGE) {
3957 ata_for_each_dev(dev, link, ALL) {
3958 if (!(link->eh_context.unloaded_mask &
3962 ata_eh_park_issue_cmd(dev, 0);
3963 ata_eh_done(link, dev, ATA_EH_PARK);
3969 ata_for_each_link(link, ap, PMP_FIRST) {
3970 struct ata_eh_context *ehc = &link->eh_context;
3972 if (sata_pmp_attached(ap) && ata_is_host_link(link))
3975 /* revalidate existing devices and attach new ones */
3976 rc = ata_eh_revalidate_and_attach(link, &dev);
3980 /* if PMP got attached, return, pmp EH will take care of it */
3981 if (link->device->class == ATA_DEV_PMP) {
3986 /* configure transfer mode if necessary */
3987 if (ehc->i.flags & ATA_EHI_SETMODE) {
3988 rc = ata_set_mode(link, &dev);
3991 ehc->i.flags &= ~ATA_EHI_SETMODE;
3994 /* If reset has been issued, clear UA to avoid
3995 * disrupting the current users of the device.
3997 if (ehc->i.flags & ATA_EHI_DID_RESET) {
3998 ata_for_each_dev(dev, link, ALL) {
3999 if (dev->class != ATA_DEV_ATAPI)
4001 rc = atapi_eh_clear_ua(dev);
4004 if (zpodd_dev_enabled(dev))
4005 zpodd_post_poweron(dev);
4009 /* retry flush if necessary */
4010 ata_for_each_dev(dev, link, ALL) {
4011 if (dev->class != ATA_DEV_ATA &&
4012 dev->class != ATA_DEV_ZAC)
4014 rc = ata_eh_maybe_retry_flush(dev);
4020 /* configure link power saving */
4021 if (link->lpm_policy != ap->target_lpm_policy) {
4022 rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev);
4027 /* this link is okay now */
4034 ata_eh_handle_dev_fail(dev, rc);
4036 if (ap->pflags & ATA_PFLAG_FROZEN) {
4037 /* PMP reset requires working host port.
4038 * Can't retry if it's frozen.
4040 if (sata_pmp_attached(ap))
4050 if (rc && r_failed_link)
4051 *r_failed_link = link;
4053 DPRINTK("EXIT, rc=%d\n", rc);
4058 * ata_eh_finish - finish up EH
4059 * @ap: host port to finish EH for
4061 * Recovery is complete. Clean up EH states and retry or finish
4067 void ata_eh_finish(struct ata_port *ap)
4071 /* retry or finish qcs */
4072 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
4073 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
4075 if (!(qc->flags & ATA_QCFLAG_FAILED))
4079 /* FIXME: Once EH migration is complete,
4080 * generate sense data in this function,
4081 * considering both err_mask and tf.
4083 if (qc->flags & ATA_QCFLAG_RETRY)
4084 ata_eh_qc_retry(qc);
4086 ata_eh_qc_complete(qc);
4088 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
4089 ata_eh_qc_complete(qc);
4091 /* feed zero TF to sense generation */
4092 memset(&qc->result_tf, 0, sizeof(qc->result_tf));
4093 ata_eh_qc_retry(qc);
4098 /* make sure nr_active_links is zero after EH */
4099 WARN_ON(ap->nr_active_links);
4100 ap->nr_active_links = 0;
4104 * ata_do_eh - do standard error handling
4105 * @ap: host port to handle error for
4107 * @prereset: prereset method (can be NULL)
4108 * @softreset: softreset method (can be NULL)
4109 * @hardreset: hardreset method (can be NULL)
4110 * @postreset: postreset method (can be NULL)
4112 * Perform standard error handling sequence.
4115 * Kernel thread context (may sleep).
4117 void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
4118 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
4119 ata_postreset_fn_t postreset)
4121 struct ata_device *dev;
4127 rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset,
4130 ata_for_each_dev(dev, &ap->link, ALL)
4131 ata_dev_disable(dev);
4138 * ata_std_error_handler - standard error handler
4139 * @ap: host port to handle error for
4141 * Standard error handler
4144 * Kernel thread context (may sleep).
4146 void ata_std_error_handler(struct ata_port *ap)
4148 struct ata_port_operations *ops = ap->ops;
4149 ata_reset_fn_t hardreset = ops->hardreset;
4151 /* ignore built-in hardreset if SCR access is not available */
4152 if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link))
4155 ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
4160 * ata_eh_handle_port_suspend - perform port suspend operation
4161 * @ap: port to suspend
4166 * Kernel thread context (may sleep).
4168 static void ata_eh_handle_port_suspend(struct ata_port *ap)
4170 unsigned long flags;
4172 struct ata_device *dev;
4174 /* are we suspending? */
4175 spin_lock_irqsave(ap->lock, flags);
4176 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
4177 ap->pm_mesg.event & PM_EVENT_RESUME) {
4178 spin_unlock_irqrestore(ap->lock, flags);
4181 spin_unlock_irqrestore(ap->lock, flags);
4183 WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
4186 * If we have a ZPODD attached, check its zero
4187 * power ready status before the port is frozen.
4188 * Only needed for runtime suspend.
4190 if (PMSG_IS_AUTO(ap->pm_mesg)) {
4191 ata_for_each_dev(dev, &ap->link, ENABLED) {
4192 if (zpodd_dev_enabled(dev))
4193 zpodd_on_suspend(dev);
4197 /* tell ACPI we're suspending */
4198 rc = ata_acpi_on_suspend(ap);
4203 ata_eh_freeze_port(ap);
4205 if (ap->ops->port_suspend)
4206 rc = ap->ops->port_suspend(ap, ap->pm_mesg);
4208 ata_acpi_set_state(ap, ap->pm_mesg);
4210 /* update the flags */
4211 spin_lock_irqsave(ap->lock, flags);
4213 ap->pflags &= ~ATA_PFLAG_PM_PENDING;
4215 ap->pflags |= ATA_PFLAG_SUSPENDED;
4216 else if (ap->pflags & ATA_PFLAG_FROZEN)
4217 ata_port_schedule_eh(ap);
4219 spin_unlock_irqrestore(ap->lock, flags);
4225 * ata_eh_handle_port_resume - perform port resume operation
4226 * @ap: port to resume
4231 * Kernel thread context (may sleep).
4233 static void ata_eh_handle_port_resume(struct ata_port *ap)
4235 struct ata_link *link;
4236 struct ata_device *dev;
4237 unsigned long flags;
4240 /* are we resuming? */
4241 spin_lock_irqsave(ap->lock, flags);
4242 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
4243 !(ap->pm_mesg.event & PM_EVENT_RESUME)) {
4244 spin_unlock_irqrestore(ap->lock, flags);
4247 spin_unlock_irqrestore(ap->lock, flags);
4249 WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED));
4252 * Error timestamps are in jiffies which doesn't run while
4253 * suspended and PHY events during resume isn't too uncommon.
4254 * When the two are combined, it can lead to unnecessary speed
4255 * downs if the machine is suspended and resumed repeatedly.
4256 * Clear error history.
4258 ata_for_each_link(link, ap, HOST_FIRST)
4259 ata_for_each_dev(dev, link, ALL)
4260 ata_ering_clear(&dev->ering);
4262 ata_acpi_set_state(ap, ap->pm_mesg);
4264 if (ap->ops->port_resume)
4265 rc = ap->ops->port_resume(ap);
4267 /* tell ACPI that we're resuming */
4268 ata_acpi_on_resume(ap);
4270 /* update the flags */
4271 spin_lock_irqsave(ap->lock, flags);
4272 ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
4273 spin_unlock_irqrestore(ap->lock, flags);
4275 #endif /* CONFIG_PM */