2 * libata-eh.c - libata error handling
4 * Maintained by: Tejun Heo <tj@kernel.org>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2006 Tejun Heo <htejun@gmail.com>
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; either version 2, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/driver-api/libata.rst
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
35 #include <linux/kernel.h>
36 #include <linux/blkdev.h>
37 #include <linux/export.h>
38 #include <linux/pci.h>
39 #include <scsi/scsi.h>
40 #include <scsi/scsi_host.h>
41 #include <scsi/scsi_eh.h>
42 #include <scsi/scsi_device.h>
43 #include <scsi/scsi_cmnd.h>
44 #include <scsi/scsi_dbg.h>
45 #include "../scsi/scsi_transport_api.h"
47 #include <linux/libata.h>
49 #include <trace/events/libata.h>
53 /* speed down verdicts */
54 ATA_EH_SPDN_NCQ_OFF = (1 << 0),
55 ATA_EH_SPDN_SPEED_DOWN = (1 << 1),
56 ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2),
57 ATA_EH_SPDN_KEEP_ERRORS = (1 << 3),
60 ATA_EFLAG_IS_IO = (1 << 0),
61 ATA_EFLAG_DUBIOUS_XFER = (1 << 1),
62 ATA_EFLAG_OLD_ER = (1 << 31),
64 /* error categories */
67 ATA_ECAT_TOUT_HSM = 2,
69 ATA_ECAT_DUBIOUS_NONE = 4,
70 ATA_ECAT_DUBIOUS_ATA_BUS = 5,
71 ATA_ECAT_DUBIOUS_TOUT_HSM = 6,
72 ATA_ECAT_DUBIOUS_UNK_DEV = 7,
75 ATA_EH_CMD_DFL_TIMEOUT = 5000,
77 /* always put at least this amount of time between resets */
78 ATA_EH_RESET_COOL_DOWN = 5000,
80 /* Waiting in ->prereset can never be reliable. It's
81 * sometimes nice to wait there but it can't be depended upon;
82 * otherwise, we wouldn't be resetting. Just give it enough
83 * time for most drives to spin up.
85 ATA_EH_PRERESET_TIMEOUT = 10000,
86 ATA_EH_FASTDRAIN_INTERVAL = 3000,
90 /* probe speed down parameters, see ata_eh_schedule_probe() */
91 ATA_EH_PROBE_TRIAL_INTERVAL = 60000, /* 1 min */
92 ATA_EH_PROBE_TRIALS = 2,
95 /* The following table determines how we sequence resets. Each entry
96 * represents timeout for that try. The first try can be soft or
97 * hardreset. All others are hardreset if available. In most cases
98 * the first reset w/ 10sec timeout should succeed. Following entries
99 * are mostly for error handling, hotplug and those outlier devices that
100 * take an exceptionally long time to recover from reset.
102 static const unsigned long ata_eh_reset_timeouts[] = {
103 10000, /* most drives spin up by 10sec */
104 10000, /* > 99% working drives spin up before 20sec */
105 35000, /* give > 30 secs of idleness for outlier devices */
106 5000, /* and sweet one last chance */
107 ULONG_MAX, /* > 1 min has elapsed, give up */
110 static const unsigned long ata_eh_identify_timeouts[] = {
111 5000, /* covers > 99% of successes and not too boring on failures */
112 10000, /* combined time till here is enough even for media access */
113 30000, /* for true idiots */
117 static const unsigned long ata_eh_revalidate_timeouts[] = {
118 15000, /* Some drives are slow to read log pages when waking-up */
119 15000, /* combined time till here is enough even for media access */
123 static const unsigned long ata_eh_flush_timeouts[] = {
124 15000, /* be generous with flush */
126 30000, /* and even more generous */
130 static const unsigned long ata_eh_other_timeouts[] = {
131 5000, /* same rationale as identify timeout */
133 /* but no merciful 30sec for other commands, it just isn't worth it */
137 struct ata_eh_cmd_timeout_ent {
139 const unsigned long *timeouts;
142 /* The following table determines timeouts to use for EH internal
143 * commands. Each table entry is a command class and matches the
144 * commands the entry applies to and the timeout table to use.
146 * On the retry after a command timed out, the next timeout value from
147 * the table is used. If the table doesn't contain further entries,
148 * the last value is used.
150 * ehc->cmd_timeout_idx keeps track of which timeout to use per
151 * command class, so if SET_FEATURES times out on the first try, the
152 * next try will use the second timeout value only for that class.
154 #define CMDS(cmds...) (const u8 []){ cmds, 0 }
155 static const struct ata_eh_cmd_timeout_ent
156 ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
157 { .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI),
158 .timeouts = ata_eh_identify_timeouts, },
159 { .commands = CMDS(ATA_CMD_READ_LOG_EXT, ATA_CMD_READ_LOG_DMA_EXT),
160 .timeouts = ata_eh_revalidate_timeouts, },
161 { .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT),
162 .timeouts = ata_eh_other_timeouts, },
163 { .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT),
164 .timeouts = ata_eh_other_timeouts, },
165 { .commands = CMDS(ATA_CMD_SET_FEATURES),
166 .timeouts = ata_eh_other_timeouts, },
167 { .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS),
168 .timeouts = ata_eh_other_timeouts, },
169 { .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT),
170 .timeouts = ata_eh_flush_timeouts },
174 static void __ata_port_freeze(struct ata_port *ap);
176 static void ata_eh_handle_port_suspend(struct ata_port *ap);
177 static void ata_eh_handle_port_resume(struct ata_port *ap);
178 #else /* CONFIG_PM */
179 static void ata_eh_handle_port_suspend(struct ata_port *ap)
182 static void ata_eh_handle_port_resume(struct ata_port *ap)
184 #endif /* CONFIG_PM */
186 static __printf(2, 0) void __ata_ehi_pushv_desc(struct ata_eh_info *ehi,
187 const char *fmt, va_list args)
189 ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len,
190 ATA_EH_DESC_LEN - ehi->desc_len,
195 * __ata_ehi_push_desc - push error description without adding separator
197 * @fmt: printf format string
199 * Format string according to @fmt and append it to @ehi->desc.
202 * spin_lock_irqsave(host lock)
204 void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
209 __ata_ehi_pushv_desc(ehi, fmt, args);
214 * ata_ehi_push_desc - push error description with separator
216 * @fmt: printf format string
218 * Format string according to @fmt and append it to @ehi->desc.
219 * If @ehi->desc is not empty, ", " is added in-between.
222 * spin_lock_irqsave(host lock)
224 void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
229 __ata_ehi_push_desc(ehi, ", ");
232 __ata_ehi_pushv_desc(ehi, fmt, args);
237 * ata_ehi_clear_desc - clean error description
243 * spin_lock_irqsave(host lock)
245 void ata_ehi_clear_desc(struct ata_eh_info *ehi)
252 * ata_port_desc - append port description
253 * @ap: target ATA port
254 * @fmt: printf format string
256 * Format string according to @fmt and append it to port
257 * description. If port description is not empty, " " is added
258 * in-between. This function is to be used while initializing
259 * ata_host. The description is printed on host registration.
264 void ata_port_desc(struct ata_port *ap, const char *fmt, ...)
268 WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING));
270 if (ap->link.eh_info.desc_len)
271 __ata_ehi_push_desc(&ap->link.eh_info, " ");
274 __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args);
281 * ata_port_pbar_desc - append PCI BAR description
282 * @ap: target ATA port
283 * @bar: target PCI BAR
284 * @offset: offset into PCI BAR
285 * @name: name of the area
287 * If @offset is negative, this function formats a string which
288 * contains the name, address, size and type of the BAR and
289 * appends it to the port description. If @offset is zero or
290 * positive, only name and offsetted address is appended.
295 void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
298 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
300 unsigned long long start, len;
302 if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
304 else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
307 start = (unsigned long long)pci_resource_start(pdev, bar);
308 len = (unsigned long long)pci_resource_len(pdev, bar);
311 ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start);
313 ata_port_desc(ap, "%s 0x%llx", name,
314 start + (unsigned long long)offset);
317 #endif /* CONFIG_PCI */
319 static int ata_lookup_timeout_table(u8 cmd)
323 for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) {
326 for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++)
335 * ata_internal_cmd_timeout - determine timeout for an internal command
336 * @dev: target device
337 * @cmd: internal command to be issued
339 * Determine timeout for internal command @cmd for @dev.
345 * Determined timeout.
347 unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd)
349 struct ata_eh_context *ehc = &dev->link->eh_context;
350 int ent = ata_lookup_timeout_table(cmd);
354 return ATA_EH_CMD_DFL_TIMEOUT;
356 idx = ehc->cmd_timeout_idx[dev->devno][ent];
357 return ata_eh_cmd_timeout_table[ent].timeouts[idx];
361 * ata_internal_cmd_timed_out - notification for internal command timeout
362 * @dev: target device
363 * @cmd: internal command which timed out
365 * Notify EH that internal command @cmd for @dev timed out. This
366 * function should be called only for commands whose timeouts are
367 * determined using ata_internal_cmd_timeout().
372 void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd)
374 struct ata_eh_context *ehc = &dev->link->eh_context;
375 int ent = ata_lookup_timeout_table(cmd);
381 idx = ehc->cmd_timeout_idx[dev->devno][ent];
382 if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX)
383 ehc->cmd_timeout_idx[dev->devno][ent]++;
386 static void ata_ering_record(struct ata_ering *ering, unsigned int eflags,
387 unsigned int err_mask)
389 struct ata_ering_entry *ent;
394 ering->cursor %= ATA_ERING_SIZE;
396 ent = &ering->ring[ering->cursor];
397 ent->eflags = eflags;
398 ent->err_mask = err_mask;
399 ent->timestamp = get_jiffies_64();
402 static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering)
404 struct ata_ering_entry *ent = &ering->ring[ering->cursor];
411 int ata_ering_map(struct ata_ering *ering,
412 int (*map_fn)(struct ata_ering_entry *, void *),
416 struct ata_ering_entry *ent;
420 ent = &ering->ring[idx];
423 rc = map_fn(ent, arg);
426 idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
427 } while (idx != ering->cursor);
432 static int ata_ering_clear_cb(struct ata_ering_entry *ent, void *void_arg)
434 ent->eflags |= ATA_EFLAG_OLD_ER;
438 static void ata_ering_clear(struct ata_ering *ering)
440 ata_ering_map(ering, ata_ering_clear_cb, NULL);
443 static unsigned int ata_eh_dev_action(struct ata_device *dev)
445 struct ata_eh_context *ehc = &dev->link->eh_context;
447 return ehc->i.action | ehc->i.dev_action[dev->devno];
450 static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
451 struct ata_eh_info *ehi, unsigned int action)
453 struct ata_device *tdev;
456 ehi->action &= ~action;
457 ata_for_each_dev(tdev, link, ALL)
458 ehi->dev_action[tdev->devno] &= ~action;
460 /* doesn't make sense for port-wide EH actions */
461 WARN_ON(!(action & ATA_EH_PERDEV_MASK));
463 /* break ehi->action into ehi->dev_action */
464 if (ehi->action & action) {
465 ata_for_each_dev(tdev, link, ALL)
466 ehi->dev_action[tdev->devno] |=
467 ehi->action & action;
468 ehi->action &= ~action;
471 /* turn off the specified per-dev action */
472 ehi->dev_action[dev->devno] &= ~action;
477 * ata_eh_acquire - acquire EH ownership
478 * @ap: ATA port to acquire EH ownership for
480 * Acquire EH ownership for @ap. This is the basic exclusion
481 * mechanism for ports sharing a host. Only one port hanging off
482 * the same host can claim the ownership of EH.
487 void ata_eh_acquire(struct ata_port *ap)
489 mutex_lock(&ap->host->eh_mutex);
490 WARN_ON_ONCE(ap->host->eh_owner);
491 ap->host->eh_owner = current;
495 * ata_eh_release - release EH ownership
496 * @ap: ATA port to release EH ownership for
498 * Release EH ownership for @ap if the caller. The caller must
499 * have acquired EH ownership using ata_eh_acquire() previously.
504 void ata_eh_release(struct ata_port *ap)
506 WARN_ON_ONCE(ap->host->eh_owner != current);
507 ap->host->eh_owner = NULL;
508 mutex_unlock(&ap->host->eh_mutex);
512 * ata_scsi_timed_out - SCSI layer time out callback
513 * @cmd: timed out SCSI command
515 * Handles SCSI layer timeout. We race with normal completion of
516 * the qc for @cmd. If the qc is already gone, we lose and let
517 * the scsi command finish (EH_HANDLED). Otherwise, the qc has
518 * timed out and EH should be invoked. Prevent ata_qc_complete()
519 * from finishing it by setting EH_SCHEDULED and return
522 * TODO: kill this function once old EH is gone.
525 * Called from timer context
528 * EH_HANDLED or EH_NOT_HANDLED
530 enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
532 struct Scsi_Host *host = cmd->device->host;
533 struct ata_port *ap = ata_shost_to_port(host);
535 struct ata_queued_cmd *qc;
536 enum blk_eh_timer_return ret;
540 if (ap->ops->error_handler) {
541 ret = BLK_EH_NOT_HANDLED;
545 ret = BLK_EH_HANDLED;
546 spin_lock_irqsave(ap->lock, flags);
547 qc = ata_qc_from_tag(ap, ap->link.active_tag);
549 WARN_ON(qc->scsicmd != cmd);
550 qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
551 qc->err_mask |= AC_ERR_TIMEOUT;
552 ret = BLK_EH_NOT_HANDLED;
554 spin_unlock_irqrestore(ap->lock, flags);
557 DPRINTK("EXIT, ret=%d\n", ret);
560 EXPORT_SYMBOL(ata_scsi_timed_out);
562 static void ata_eh_unload(struct ata_port *ap)
564 struct ata_link *link;
565 struct ata_device *dev;
568 /* Restore SControl IPM and SPD for the next driver and
569 * disable attached devices.
571 ata_for_each_link(link, ap, PMP_FIRST) {
572 sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0);
573 ata_for_each_dev(dev, link, ALL)
574 ata_dev_disable(dev);
577 /* freeze and set UNLOADED */
578 spin_lock_irqsave(ap->lock, flags);
580 ata_port_freeze(ap); /* won't be thawed */
581 ap->pflags &= ~ATA_PFLAG_EH_PENDING; /* clear pending from freeze */
582 ap->pflags |= ATA_PFLAG_UNLOADED;
584 spin_unlock_irqrestore(ap->lock, flags);
588 * ata_scsi_error - SCSI layer error handler callback
589 * @host: SCSI host on which error occurred
591 * Handles SCSI-layer-thrown error events.
594 * Inherited from SCSI layer (none, can sleep)
599 void ata_scsi_error(struct Scsi_Host *host)
601 struct ata_port *ap = ata_shost_to_port(host);
603 LIST_HEAD(eh_work_q);
607 spin_lock_irqsave(host->host_lock, flags);
608 list_splice_init(&host->eh_cmd_q, &eh_work_q);
609 spin_unlock_irqrestore(host->host_lock, flags);
611 ata_scsi_cmd_error_handler(host, ap, &eh_work_q);
613 /* If we timed raced normal completion and there is nothing to
614 recover nr_timedout == 0 why exactly are we doing error recovery ? */
615 ata_scsi_port_error_handler(host, ap);
617 /* finish or retry handled scmd's and clean up */
618 WARN_ON(!list_empty(&eh_work_q));
624 * ata_scsi_cmd_error_handler - error callback for a list of commands
625 * @host: scsi host containing the port
626 * @ap: ATA port within the host
627 * @eh_work_q: list of commands to process
629 * process the given list of commands and return those finished to the
630 * ap->eh_done_q. This function is the first part of the libata error
631 * handler which processes a given list of failed commands.
633 void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
634 struct list_head *eh_work_q)
639 /* make sure sff pio task is not running */
640 ata_sff_flush_pio_task(ap);
642 /* synchronize with host lock and sort out timeouts */
644 /* For new EH, all qcs are finished in one of three ways -
645 * normal completion, error completion, and SCSI timeout.
646 * Both completions can race against SCSI timeout. When normal
647 * completion wins, the qc never reaches EH. When error
648 * completion wins, the qc has ATA_QCFLAG_FAILED set.
650 * When SCSI timeout wins, things are a bit more complex.
651 * Normal or error completion can occur after the timeout but
652 * before this point. In such cases, both types of
653 * completions are honored. A scmd is determined to have
654 * timed out iff its associated qc is active and not failed.
656 spin_lock_irqsave(ap->lock, flags);
657 if (ap->ops->error_handler) {
658 struct scsi_cmnd *scmd, *tmp;
661 /* This must occur under the ap->lock as we don't want
662 a polled recovery to race the real interrupt handler
664 The lost_interrupt handler checks for any completed but
665 non-notified command and completes much like an IRQ handler.
667 We then fall into the error recovery code which will treat
668 this as if normal completion won the race */
670 if (ap->ops->lost_interrupt)
671 ap->ops->lost_interrupt(ap);
673 list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) {
674 struct ata_queued_cmd *qc;
676 for (i = 0; i < ATA_MAX_QUEUE; i++) {
677 qc = __ata_qc_from_tag(ap, i);
678 if (qc->flags & ATA_QCFLAG_ACTIVE &&
683 if (i < ATA_MAX_QUEUE) {
684 /* the scmd has an associated qc */
685 if (!(qc->flags & ATA_QCFLAG_FAILED)) {
686 /* which hasn't failed yet, timeout */
687 qc->err_mask |= AC_ERR_TIMEOUT;
688 qc->flags |= ATA_QCFLAG_FAILED;
692 /* Normal completion occurred after
693 * SCSI timeout but before this point.
694 * Successfully complete it.
696 scmd->retries = scmd->allowed;
697 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
701 /* If we have timed out qcs. They belong to EH from
702 * this point but the state of the controller is
703 * unknown. Freeze the port to make sure the IRQ
704 * handler doesn't diddle with those qcs. This must
705 * be done atomically w.r.t. setting QCFLAG_FAILED.
708 __ata_port_freeze(ap);
711 /* initialize eh_tries */
712 ap->eh_tries = ATA_EH_MAX_TRIES;
714 spin_unlock_irqrestore(ap->lock, flags);
717 EXPORT_SYMBOL(ata_scsi_cmd_error_handler);
720 * ata_scsi_port_error_handler - recover the port after the commands
721 * @host: SCSI host containing the port
724 * Handle the recovery of the port @ap after all the commands
725 * have been recovered.
727 void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
731 /* invoke error handler */
732 if (ap->ops->error_handler) {
733 struct ata_link *link;
735 /* acquire EH ownership */
738 /* kill fast drain timer */
739 del_timer_sync(&ap->fastdrain_timer);
741 /* process port resume request */
742 ata_eh_handle_port_resume(ap);
744 /* fetch & clear EH info */
745 spin_lock_irqsave(ap->lock, flags);
747 ata_for_each_link(link, ap, HOST_FIRST) {
748 struct ata_eh_context *ehc = &link->eh_context;
749 struct ata_device *dev;
751 memset(&link->eh_context, 0, sizeof(link->eh_context));
752 link->eh_context.i = link->eh_info;
753 memset(&link->eh_info, 0, sizeof(link->eh_info));
755 ata_for_each_dev(dev, link, ENABLED) {
756 int devno = dev->devno;
758 ehc->saved_xfer_mode[devno] = dev->xfer_mode;
759 if (ata_ncq_enabled(dev))
760 ehc->saved_ncq_enabled |= 1 << devno;
764 ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
765 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
766 ap->excl_link = NULL; /* don't maintain exclusion over EH */
768 spin_unlock_irqrestore(ap->lock, flags);
770 /* invoke EH, skip if unloading or suspended */
771 if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
772 ap->ops->error_handler(ap);
774 /* if unloading, commence suicide */
775 if ((ap->pflags & ATA_PFLAG_UNLOADING) &&
776 !(ap->pflags & ATA_PFLAG_UNLOADED))
781 /* process port suspend request */
782 ata_eh_handle_port_suspend(ap);
784 /* Exception might have happened after ->error_handler
785 * recovered the port but before this point. Repeat
788 spin_lock_irqsave(ap->lock, flags);
790 if (ap->pflags & ATA_PFLAG_EH_PENDING) {
791 if (--ap->eh_tries) {
792 spin_unlock_irqrestore(ap->lock, flags);
796 "EH pending after %d tries, giving up\n",
798 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
801 /* this run is complete, make sure EH info is clear */
802 ata_for_each_link(link, ap, HOST_FIRST)
803 memset(&link->eh_info, 0, sizeof(link->eh_info));
805 /* end eh (clear host_eh_scheduled) while holding
806 * ap->lock such that if exception occurs after this
807 * point but before EH completion, SCSI midlayer will
812 spin_unlock_irqrestore(ap->lock, flags);
815 WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL);
816 ap->ops->eng_timeout(ap);
819 scsi_eh_flush_done_q(&ap->eh_done_q);
822 spin_lock_irqsave(ap->lock, flags);
824 if (ap->pflags & ATA_PFLAG_LOADING)
825 ap->pflags &= ~ATA_PFLAG_LOADING;
826 else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
827 schedule_delayed_work(&ap->hotplug_task, 0);
829 if (ap->pflags & ATA_PFLAG_RECOVERED)
830 ata_port_info(ap, "EH complete\n");
832 ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
834 /* tell wait_eh that we're done */
835 ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS;
836 wake_up_all(&ap->eh_wait_q);
838 spin_unlock_irqrestore(ap->lock, flags);
840 EXPORT_SYMBOL_GPL(ata_scsi_port_error_handler);
843 * ata_port_wait_eh - Wait for the currently pending EH to complete
844 * @ap: Port to wait EH for
846 * Wait until the currently pending EH is complete.
849 * Kernel thread context (may sleep).
851 void ata_port_wait_eh(struct ata_port *ap)
857 spin_lock_irqsave(ap->lock, flags);
859 while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
860 prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
861 spin_unlock_irqrestore(ap->lock, flags);
863 spin_lock_irqsave(ap->lock, flags);
865 finish_wait(&ap->eh_wait_q, &wait);
867 spin_unlock_irqrestore(ap->lock, flags);
869 /* make sure SCSI EH is complete */
870 if (scsi_host_in_recovery(ap->scsi_host)) {
875 EXPORT_SYMBOL_GPL(ata_port_wait_eh);
877 static int ata_eh_nr_in_flight(struct ata_port *ap)
882 /* count only non-internal commands */
883 for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++)
884 if (ata_qc_from_tag(ap, tag))
890 void ata_eh_fastdrain_timerfn(unsigned long arg)
892 struct ata_port *ap = (void *)arg;
896 spin_lock_irqsave(ap->lock, flags);
898 cnt = ata_eh_nr_in_flight(ap);
904 if (cnt == ap->fastdrain_cnt) {
907 /* No progress during the last interval, tag all
908 * in-flight qcs as timed out and freeze the port.
910 for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) {
911 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
913 qc->err_mask |= AC_ERR_TIMEOUT;
918 /* some qcs have finished, give it another chance */
919 ap->fastdrain_cnt = cnt;
920 ap->fastdrain_timer.expires =
921 ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
922 add_timer(&ap->fastdrain_timer);
926 spin_unlock_irqrestore(ap->lock, flags);
930 * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain
931 * @ap: target ATA port
932 * @fastdrain: activate fast drain
934 * Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain
935 * is non-zero and EH wasn't pending before. Fast drain ensures
936 * that EH kicks in in timely manner.
939 * spin_lock_irqsave(host lock)
941 static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
945 /* already scheduled? */
946 if (ap->pflags & ATA_PFLAG_EH_PENDING)
949 ap->pflags |= ATA_PFLAG_EH_PENDING;
954 /* do we have in-flight qcs? */
955 cnt = ata_eh_nr_in_flight(ap);
959 /* activate fast drain */
960 ap->fastdrain_cnt = cnt;
961 ap->fastdrain_timer.expires =
962 ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
963 add_timer(&ap->fastdrain_timer);
967 * ata_qc_schedule_eh - schedule qc for error handling
968 * @qc: command to schedule error handling for
970 * Schedule error handling for @qc. EH will kick in as soon as
971 * other commands are drained.
974 * spin_lock_irqsave(host lock)
976 void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
978 struct ata_port *ap = qc->ap;
979 struct request_queue *q = qc->scsicmd->device->request_queue;
982 WARN_ON(!ap->ops->error_handler);
984 qc->flags |= ATA_QCFLAG_FAILED;
985 ata_eh_set_pending(ap, 1);
987 /* The following will fail if timeout has already expired.
988 * ata_scsi_error() takes care of such scmds on EH entry.
989 * Note that ATA_QCFLAG_FAILED is unconditionally set after
990 * this function completes.
992 spin_lock_irqsave(q->queue_lock, flags);
993 blk_abort_request(qc->scsicmd->request);
994 spin_unlock_irqrestore(q->queue_lock, flags);
998 * ata_std_sched_eh - non-libsas ata_ports issue eh with this common routine
999 * @ap: ATA port to schedule EH for
1001 * LOCKING: inherited from ata_port_schedule_eh
1002 * spin_lock_irqsave(host lock)
1004 void ata_std_sched_eh(struct ata_port *ap)
1006 WARN_ON(!ap->ops->error_handler);
1008 if (ap->pflags & ATA_PFLAG_INITIALIZING)
1011 ata_eh_set_pending(ap, 1);
1012 scsi_schedule_eh(ap->scsi_host);
1014 DPRINTK("port EH scheduled\n");
1016 EXPORT_SYMBOL_GPL(ata_std_sched_eh);
1019 * ata_std_end_eh - non-libsas ata_ports complete eh with this common routine
1020 * @ap: ATA port to end EH for
1022 * In the libata object model there is a 1:1 mapping of ata_port to
1023 * shost, so host fields can be directly manipulated under ap->lock, in
1024 * the libsas case we need to hold a lock at the ha->level to coordinate
1028 * spin_lock_irqsave(host lock)
1030 void ata_std_end_eh(struct ata_port *ap)
1032 struct Scsi_Host *host = ap->scsi_host;
1034 host->host_eh_scheduled = 0;
1036 EXPORT_SYMBOL(ata_std_end_eh);
1040 * ata_port_schedule_eh - schedule error handling without a qc
1041 * @ap: ATA port to schedule EH for
1043 * Schedule error handling for @ap. EH will kick in as soon as
1044 * all commands are drained.
1047 * spin_lock_irqsave(host lock)
1049 void ata_port_schedule_eh(struct ata_port *ap)
1051 /* see: ata_std_sched_eh, unless you know better */
1052 ap->ops->sched_eh(ap);
1055 static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
1057 int tag, nr_aborted = 0;
1059 WARN_ON(!ap->ops->error_handler);
1061 /* we're gonna abort all commands, no need for fast drain */
1062 ata_eh_set_pending(ap, 0);
1064 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1065 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
1067 if (qc && (!link || qc->dev->link == link)) {
1068 qc->flags |= ATA_QCFLAG_FAILED;
1069 ata_qc_complete(qc);
1075 ata_port_schedule_eh(ap);
1081 * ata_link_abort - abort all qc's on the link
1082 * @link: ATA link to abort qc's for
1084 * Abort all active qc's active on @link and schedule EH.
1087 * spin_lock_irqsave(host lock)
1090 * Number of aborted qc's.
1092 int ata_link_abort(struct ata_link *link)
1094 return ata_do_link_abort(link->ap, link);
1098 * ata_port_abort - abort all qc's on the port
1099 * @ap: ATA port to abort qc's for
1101 * Abort all active qc's of @ap and schedule EH.
1104 * spin_lock_irqsave(host_set lock)
1107 * Number of aborted qc's.
1109 int ata_port_abort(struct ata_port *ap)
1111 return ata_do_link_abort(ap, NULL);
1115 * __ata_port_freeze - freeze port
1116 * @ap: ATA port to freeze
1118 * This function is called when HSM violation or some other
1119 * condition disrupts normal operation of the port. Frozen port
1120 * is not allowed to perform any operation until the port is
1121 * thawed, which usually follows a successful reset.
1123 * ap->ops->freeze() callback can be used for freezing the port
1124 * hardware-wise (e.g. mask interrupt and stop DMA engine). If a
1125 * port cannot be frozen hardware-wise, the interrupt handler
1126 * must ack and clear interrupts unconditionally while the port
1130 * spin_lock_irqsave(host lock)
1132 static void __ata_port_freeze(struct ata_port *ap)
1134 WARN_ON(!ap->ops->error_handler);
1136 if (ap->ops->freeze)
1137 ap->ops->freeze(ap);
1139 ap->pflags |= ATA_PFLAG_FROZEN;
1141 DPRINTK("ata%u port frozen\n", ap->print_id);
1145 * ata_port_freeze - abort & freeze port
1146 * @ap: ATA port to freeze
1148 * Abort and freeze @ap. The freeze operation must be called
1149 * first, because some hardware requires special operations
1150 * before the taskfile registers are accessible.
1153 * spin_lock_irqsave(host lock)
1156 * Number of aborted commands.
1158 int ata_port_freeze(struct ata_port *ap)
1162 WARN_ON(!ap->ops->error_handler);
1164 __ata_port_freeze(ap);
1165 nr_aborted = ata_port_abort(ap);
1171 * sata_async_notification - SATA async notification handler
1172 * @ap: ATA port where async notification is received
1174 * Handler to be called when async notification via SDB FIS is
1175 * received. This function schedules EH if necessary.
1178 * spin_lock_irqsave(host lock)
1181 * 1 if EH is scheduled, 0 otherwise.
1183 int sata_async_notification(struct ata_port *ap)
1188 if (!(ap->flags & ATA_FLAG_AN))
1191 rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
1193 sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
1195 if (!sata_pmp_attached(ap) || rc) {
1196 /* PMP is not attached or SNTF is not available */
1197 if (!sata_pmp_attached(ap)) {
1198 /* PMP is not attached. Check whether ATAPI
1199 * AN is configured. If so, notify media
1202 struct ata_device *dev = ap->link.device;
1204 if ((dev->class == ATA_DEV_ATAPI) &&
1205 (dev->flags & ATA_DFLAG_AN))
1206 ata_scsi_media_change_notify(dev);
1209 /* PMP is attached but SNTF is not available.
1210 * ATAPI async media change notification is
1211 * not used. The PMP must be reporting PHY
1212 * status change, schedule EH.
1214 ata_port_schedule_eh(ap);
1218 /* PMP is attached and SNTF is available */
1219 struct ata_link *link;
1221 /* check and notify ATAPI AN */
1222 ata_for_each_link(link, ap, EDGE) {
1223 if (!(sntf & (1 << link->pmp)))
1226 if ((link->device->class == ATA_DEV_ATAPI) &&
1227 (link->device->flags & ATA_DFLAG_AN))
1228 ata_scsi_media_change_notify(link->device);
1231 /* If PMP is reporting that PHY status of some
1232 * downstream ports has changed, schedule EH.
1234 if (sntf & (1 << SATA_PMP_CTRL_PORT)) {
1235 ata_port_schedule_eh(ap);
1244 * ata_eh_freeze_port - EH helper to freeze port
1245 * @ap: ATA port to freeze
1252 void ata_eh_freeze_port(struct ata_port *ap)
1254 unsigned long flags;
1256 if (!ap->ops->error_handler)
1259 spin_lock_irqsave(ap->lock, flags);
1260 __ata_port_freeze(ap);
1261 spin_unlock_irqrestore(ap->lock, flags);
1265 * ata_port_thaw_port - EH helper to thaw port
1266 * @ap: ATA port to thaw
1268 * Thaw frozen port @ap.
1273 void ata_eh_thaw_port(struct ata_port *ap)
1275 unsigned long flags;
1277 if (!ap->ops->error_handler)
1280 spin_lock_irqsave(ap->lock, flags);
1282 ap->pflags &= ~ATA_PFLAG_FROZEN;
1287 spin_unlock_irqrestore(ap->lock, flags);
1289 DPRINTK("ata%u port thawed\n", ap->print_id);
1292 static void ata_eh_scsidone(struct scsi_cmnd *scmd)
1297 static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
1299 struct ata_port *ap = qc->ap;
1300 struct scsi_cmnd *scmd = qc->scsicmd;
1301 unsigned long flags;
1303 spin_lock_irqsave(ap->lock, flags);
1304 qc->scsidone = ata_eh_scsidone;
1305 __ata_qc_complete(qc);
1306 WARN_ON(ata_tag_valid(qc->tag));
1307 spin_unlock_irqrestore(ap->lock, flags);
1309 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
1313 * ata_eh_qc_complete - Complete an active ATA command from EH
1314 * @qc: Command to complete
1316 * Indicate to the mid and upper layers that an ATA command has
1317 * completed. To be used from EH.
1319 void ata_eh_qc_complete(struct ata_queued_cmd *qc)
1321 struct scsi_cmnd *scmd = qc->scsicmd;
1322 scmd->retries = scmd->allowed;
1323 __ata_eh_qc_complete(qc);
1327 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
1328 * @qc: Command to retry
1330 * Indicate to the mid and upper layers that an ATA command
1331 * should be retried. To be used from EH.
1333 * SCSI midlayer limits the number of retries to scmd->allowed.
1334 * scmd->allowed is incremented for commands which get retried
1335 * due to unrelated failures (qc->err_mask is zero).
1337 void ata_eh_qc_retry(struct ata_queued_cmd *qc)
1339 struct scsi_cmnd *scmd = qc->scsicmd;
1342 __ata_eh_qc_complete(qc);
1346 * ata_dev_disable - disable ATA device
1347 * @dev: ATA device to disable
1354 void ata_dev_disable(struct ata_device *dev)
1356 if (!ata_dev_enabled(dev))
1359 if (ata_msg_drv(dev->link->ap))
1360 ata_dev_warn(dev, "disabled\n");
1361 ata_acpi_on_disable(dev);
1362 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET);
1365 /* From now till the next successful probe, ering is used to
1366 * track probe failures. Clear accumulated device error info.
1368 ata_ering_clear(&dev->ering);
1372 * ata_eh_detach_dev - detach ATA device
1373 * @dev: ATA device to detach
1380 void ata_eh_detach_dev(struct ata_device *dev)
1382 struct ata_link *link = dev->link;
1383 struct ata_port *ap = link->ap;
1384 struct ata_eh_context *ehc = &link->eh_context;
1385 unsigned long flags;
1387 ata_dev_disable(dev);
1389 spin_lock_irqsave(ap->lock, flags);
1391 dev->flags &= ~ATA_DFLAG_DETACH;
1393 if (ata_scsi_offline_dev(dev)) {
1394 dev->flags |= ATA_DFLAG_DETACHED;
1395 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
1398 /* clear per-dev EH info */
1399 ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK);
1400 ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK);
1401 ehc->saved_xfer_mode[dev->devno] = 0;
1402 ehc->saved_ncq_enabled &= ~(1 << dev->devno);
1404 spin_unlock_irqrestore(ap->lock, flags);
1408 * ata_eh_about_to_do - about to perform eh_action
1409 * @link: target ATA link
1410 * @dev: target ATA dev for per-dev action (can be NULL)
1411 * @action: action about to be performed
1413 * Called just before performing EH actions to clear related bits
1414 * in @link->eh_info such that eh actions are not unnecessarily
1420 void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
1421 unsigned int action)
1423 struct ata_port *ap = link->ap;
1424 struct ata_eh_info *ehi = &link->eh_info;
1425 struct ata_eh_context *ehc = &link->eh_context;
1426 unsigned long flags;
1428 spin_lock_irqsave(ap->lock, flags);
1430 ata_eh_clear_action(link, dev, ehi, action);
1432 /* About to take EH action, set RECOVERED. Ignore actions on
1433 * slave links as master will do them again.
1435 if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link)
1436 ap->pflags |= ATA_PFLAG_RECOVERED;
1438 spin_unlock_irqrestore(ap->lock, flags);
1442 * ata_eh_done - EH action complete
1443 * @link: ATA link for which EH actions are complete
1444 * @dev: target ATA dev for per-dev action (can be NULL)
1445 * @action: action just completed
1447 * Called right after performing EH actions to clear related bits
1448 * in @link->eh_context.
1453 void ata_eh_done(struct ata_link *link, struct ata_device *dev,
1454 unsigned int action)
1456 struct ata_eh_context *ehc = &link->eh_context;
1458 ata_eh_clear_action(link, dev, &ehc->i, action);
1462 * ata_err_string - convert err_mask to descriptive string
1463 * @err_mask: error mask to convert to string
1465 * Convert @err_mask to descriptive string. Errors are
1466 * prioritized according to severity and only the most severe
1467 * error is reported.
1473 * Descriptive string for @err_mask
1475 static const char *ata_err_string(unsigned int err_mask)
1477 if (err_mask & AC_ERR_HOST_BUS)
1478 return "host bus error";
1479 if (err_mask & AC_ERR_ATA_BUS)
1480 return "ATA bus error";
1481 if (err_mask & AC_ERR_TIMEOUT)
1483 if (err_mask & AC_ERR_HSM)
1484 return "HSM violation";
1485 if (err_mask & AC_ERR_SYSTEM)
1486 return "internal error";
1487 if (err_mask & AC_ERR_MEDIA)
1488 return "media error";
1489 if (err_mask & AC_ERR_INVALID)
1490 return "invalid argument";
1491 if (err_mask & AC_ERR_DEV)
1492 return "device error";
1493 return "unknown error";
1497 * ata_eh_read_log_10h - Read log page 10h for NCQ error details
1498 * @dev: Device to read log page 10h from
1499 * @tag: Resulting tag of the failed command
1500 * @tf: Resulting taskfile registers of the failed command
1502 * Read log page 10h to obtain NCQ error details and clear error
1506 * Kernel thread context (may sleep).
1509 * 0 on success, -errno otherwise.
1511 static int ata_eh_read_log_10h(struct ata_device *dev,
1512 int *tag, struct ata_taskfile *tf)
1514 u8 *buf = dev->link->ap->sector_buf;
1515 unsigned int err_mask;
1519 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, 0, buf, 1);
1524 for (i = 0; i < ATA_SECT_SIZE; i++)
1527 ata_dev_warn(dev, "invalid checksum 0x%x on log page 10h\n",
1533 *tag = buf[0] & 0x1f;
1535 tf->command = buf[2];
1536 tf->feature = buf[3];
1540 tf->device = buf[7];
1541 tf->hob_lbal = buf[8];
1542 tf->hob_lbam = buf[9];
1543 tf->hob_lbah = buf[10];
1544 tf->nsect = buf[12];
1545 tf->hob_nsect = buf[13];
1546 if (dev->class == ATA_DEV_ZAC && ata_id_has_ncq_autosense(dev->id))
1547 tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16];
1553 * atapi_eh_tur - perform ATAPI TEST_UNIT_READY
1554 * @dev: target ATAPI device
1555 * @r_sense_key: out parameter for sense_key
1557 * Perform ATAPI TEST_UNIT_READY.
1560 * EH context (may sleep).
1563 * 0 on success, AC_ERR_* mask on failure.
1565 unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
1567 u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 };
1568 struct ata_taskfile tf;
1569 unsigned int err_mask;
1571 ata_tf_init(dev, &tf);
1573 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1574 tf.command = ATA_CMD_PACKET;
1575 tf.protocol = ATAPI_PROT_NODATA;
1577 err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0);
1578 if (err_mask == AC_ERR_DEV)
1579 *r_sense_key = tf.feature >> 4;
1584 * ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
1585 * @qc: qc to perform REQUEST_SENSE_SENSE_DATA_EXT to
1586 * @cmd: scsi command for which the sense code should be set
1588 * Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK
1589 * SENSE. This function is an EH helper.
1592 * Kernel thread context (may sleep).
1594 static void ata_eh_request_sense(struct ata_queued_cmd *qc,
1595 struct scsi_cmnd *cmd)
1597 struct ata_device *dev = qc->dev;
1598 struct ata_taskfile tf;
1599 unsigned int err_mask;
1601 if (qc->ap->pflags & ATA_PFLAG_FROZEN) {
1602 ata_dev_warn(dev, "sense data available but port frozen\n");
1606 if (!cmd || qc->flags & ATA_QCFLAG_SENSE_VALID)
1609 if (!ata_id_sense_reporting_enabled(dev->id)) {
1610 ata_dev_warn(qc->dev, "sense data reporting disabled\n");
1614 DPRINTK("ATA request sense\n");
1616 ata_tf_init(dev, &tf);
1617 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1618 tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1619 tf.command = ATA_CMD_REQ_SENSE_DATA;
1620 tf.protocol = ATA_PROT_NODATA;
1622 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1623 /* Ignore err_mask; ATA_ERR might be set */
1624 if (tf.command & ATA_SENSE) {
1625 ata_scsi_set_sense(dev, cmd, tf.lbah, tf.lbam, tf.lbal);
1626 qc->flags |= ATA_QCFLAG_SENSE_VALID;
1628 ata_dev_warn(dev, "request sense failed stat %02x emask %x\n",
1629 tf.command, err_mask);
1634 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
1635 * @dev: device to perform REQUEST_SENSE to
1636 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
1637 * @dfl_sense_key: default sense key to use
1639 * Perform ATAPI REQUEST_SENSE after the device reported CHECK
1640 * SENSE. This function is EH helper.
1643 * Kernel thread context (may sleep).
1646 * 0 on success, AC_ERR_* mask on failure
1648 unsigned int atapi_eh_request_sense(struct ata_device *dev,
1649 u8 *sense_buf, u8 dfl_sense_key)
1651 u8 cdb[ATAPI_CDB_LEN] =
1652 { REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 };
1653 struct ata_port *ap = dev->link->ap;
1654 struct ata_taskfile tf;
1656 DPRINTK("ATAPI request sense\n");
1658 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
1660 /* initialize sense_buf with the error register,
1661 * for the case where they are -not- overwritten
1663 sense_buf[0] = 0x70;
1664 sense_buf[2] = dfl_sense_key;
1666 /* some devices time out if garbage left in tf */
1667 ata_tf_init(dev, &tf);
1669 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1670 tf.command = ATA_CMD_PACKET;
1672 /* is it pointless to prefer PIO for "safety reasons"? */
1673 if (ap->flags & ATA_FLAG_PIO_DMA) {
1674 tf.protocol = ATAPI_PROT_DMA;
1675 tf.feature |= ATAPI_PKT_DMA;
1677 tf.protocol = ATAPI_PROT_PIO;
1678 tf.lbam = SCSI_SENSE_BUFFERSIZE;
1682 return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
1683 sense_buf, SCSI_SENSE_BUFFERSIZE, 0);
1687 * ata_eh_analyze_serror - analyze SError for a failed port
1688 * @link: ATA link to analyze SError for
1690 * Analyze SError if available and further determine cause of
1696 static void ata_eh_analyze_serror(struct ata_link *link)
1698 struct ata_eh_context *ehc = &link->eh_context;
1699 u32 serror = ehc->i.serror;
1700 unsigned int err_mask = 0, action = 0;
1703 if (serror & (SERR_PERSISTENT | SERR_DATA)) {
1704 err_mask |= AC_ERR_ATA_BUS;
1705 action |= ATA_EH_RESET;
1707 if (serror & SERR_PROTOCOL) {
1708 err_mask |= AC_ERR_HSM;
1709 action |= ATA_EH_RESET;
1711 if (serror & SERR_INTERNAL) {
1712 err_mask |= AC_ERR_SYSTEM;
1713 action |= ATA_EH_RESET;
1716 /* Determine whether a hotplug event has occurred. Both
1717 * SError.N/X are considered hotplug events for enabled or
1718 * host links. For disabled PMP links, only N bit is
1719 * considered as X bit is left at 1 for link plugging.
1721 if (link->lpm_policy > ATA_LPM_MAX_POWER)
1722 hotplug_mask = 0; /* hotplug doesn't work w/ LPM */
1723 else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link))
1724 hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG;
1726 hotplug_mask = SERR_PHYRDY_CHG;
1728 if (serror & hotplug_mask)
1729 ata_ehi_hotplugged(&ehc->i);
1731 ehc->i.err_mask |= err_mask;
1732 ehc->i.action |= action;
1736 * ata_eh_analyze_ncq_error - analyze NCQ error
1737 * @link: ATA link to analyze NCQ error for
1739 * Read log page 10h, determine the offending qc and acquire
1740 * error status TF. For NCQ device errors, all LLDDs have to do
1741 * is setting AC_ERR_DEV in ehi->err_mask. This function takes
1745 * Kernel thread context (may sleep).
1747 void ata_eh_analyze_ncq_error(struct ata_link *link)
1749 struct ata_port *ap = link->ap;
1750 struct ata_eh_context *ehc = &link->eh_context;
1751 struct ata_device *dev = link->device;
1752 struct ata_queued_cmd *qc;
1753 struct ata_taskfile tf;
1756 /* if frozen, we can't do much */
1757 if (ap->pflags & ATA_PFLAG_FROZEN)
1760 /* is it NCQ device error? */
1761 if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
1764 /* has LLDD analyzed already? */
1765 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1766 qc = __ata_qc_from_tag(ap, tag);
1768 if (!(qc->flags & ATA_QCFLAG_FAILED))
1775 /* okay, this error is ours */
1776 memset(&tf, 0, sizeof(tf));
1777 rc = ata_eh_read_log_10h(dev, &tag, &tf);
1779 ata_link_err(link, "failed to read log page 10h (errno=%d)\n",
1784 if (!(link->sactive & (1 << tag))) {
1785 ata_link_err(link, "log page 10h reported inactive tag %d\n",
1790 /* we've got the perpetrator, condemn it */
1791 qc = __ata_qc_from_tag(ap, tag);
1792 memcpy(&qc->result_tf, &tf, sizeof(tf));
1793 qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1794 qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
1795 if (dev->class == ATA_DEV_ZAC &&
1796 ((qc->result_tf.command & ATA_SENSE) || qc->result_tf.auxiliary)) {
1797 char sense_key, asc, ascq;
1799 sense_key = (qc->result_tf.auxiliary >> 16) & 0xff;
1800 asc = (qc->result_tf.auxiliary >> 8) & 0xff;
1801 ascq = qc->result_tf.auxiliary & 0xff;
1802 ata_scsi_set_sense(dev, qc->scsicmd, sense_key, asc, ascq);
1803 ata_scsi_set_sense_information(dev, qc->scsicmd,
1805 qc->flags |= ATA_QCFLAG_SENSE_VALID;
1808 ehc->i.err_mask &= ~AC_ERR_DEV;
1812 * ata_eh_analyze_tf - analyze taskfile of a failed qc
1813 * @qc: qc to analyze
1814 * @tf: Taskfile registers to analyze
1816 * Analyze taskfile of @qc and further determine cause of
1817 * failure. This function also requests ATAPI sense data if
1821 * Kernel thread context (may sleep).
1824 * Determined recovery action
1826 static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1827 const struct ata_taskfile *tf)
1829 unsigned int tmp, action = 0;
1830 u8 stat = tf->command, err = tf->feature;
1832 if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
1833 qc->err_mask |= AC_ERR_HSM;
1834 return ATA_EH_RESET;
1837 if (stat & (ATA_ERR | ATA_DF)) {
1838 qc->err_mask |= AC_ERR_DEV;
1840 * Sense data reporting does not work if the
1841 * device fault bit is set.
1849 switch (qc->dev->class) {
1851 if (stat & ATA_SENSE)
1852 ata_eh_request_sense(qc, qc->scsicmd);
1856 qc->err_mask |= AC_ERR_ATA_BUS;
1857 if (err & (ATA_UNC | ATA_AMNF))
1858 qc->err_mask |= AC_ERR_MEDIA;
1860 qc->err_mask |= AC_ERR_INVALID;
1864 if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
1865 tmp = atapi_eh_request_sense(qc->dev,
1866 qc->scsicmd->sense_buffer,
1867 qc->result_tf.feature >> 4);
1869 qc->flags |= ATA_QCFLAG_SENSE_VALID;
1871 qc->err_mask |= tmp;
1875 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
1876 int ret = scsi_check_sense(qc->scsicmd);
1878 * SUCCESS here means that the sense code could
1879 * evaluated and should be passed to the upper layers
1880 * for correct evaluation.
1881 * FAILED means the sense code could not interpreted
1882 * and the device would need to be reset.
1883 * NEEDS_RETRY and ADD_TO_MLQUEUE means that the
1884 * command would need to be retried.
1886 if (ret == NEEDS_RETRY || ret == ADD_TO_MLQUEUE) {
1887 qc->flags |= ATA_QCFLAG_RETRY;
1888 qc->err_mask |= AC_ERR_OTHER;
1889 } else if (ret != SUCCESS) {
1890 qc->err_mask |= AC_ERR_HSM;
1893 if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
1894 action |= ATA_EH_RESET;
1899 static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask,
1904 if (!(eflags & ATA_EFLAG_DUBIOUS_XFER))
1908 base = ATA_ECAT_DUBIOUS_NONE;
1910 if (err_mask & AC_ERR_ATA_BUS)
1911 return base + ATA_ECAT_ATA_BUS;
1913 if (err_mask & AC_ERR_TIMEOUT)
1914 return base + ATA_ECAT_TOUT_HSM;
1916 if (eflags & ATA_EFLAG_IS_IO) {
1917 if (err_mask & AC_ERR_HSM)
1918 return base + ATA_ECAT_TOUT_HSM;
1920 (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
1921 return base + ATA_ECAT_UNK_DEV;
1927 struct speed_down_verdict_arg {
1930 int nr_errors[ATA_ECAT_NR];
1933 static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
1935 struct speed_down_verdict_arg *arg = void_arg;
1938 if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < arg->since))
1941 cat = ata_eh_categorize_error(ent->eflags, ent->err_mask,
1943 arg->nr_errors[cat]++;
1949 * ata_eh_speed_down_verdict - Determine speed down verdict
1950 * @dev: Device of interest
1952 * This function examines error ring of @dev and determines
1953 * whether NCQ needs to be turned off, transfer speed should be
1954 * stepped down, or falling back to PIO is necessary.
1956 * ECAT_ATA_BUS : ATA_BUS error for any command
1958 * ECAT_TOUT_HSM : TIMEOUT for any command or HSM violation for
1961 * ECAT_UNK_DEV : Unknown DEV error for IO commands
1963 * ECAT_DUBIOUS_* : Identical to above three but occurred while
1964 * data transfer hasn't been verified.
1968 * NCQ_OFF : Turn off NCQ.
1970 * SPEED_DOWN : Speed down transfer speed but don't fall back
1973 * FALLBACK_TO_PIO : Fall back to PIO.
1975 * Even if multiple verdicts are returned, only one action is
1976 * taken per error. An action triggered by non-DUBIOUS errors
1977 * clears ering, while one triggered by DUBIOUS_* errors doesn't.
1978 * This is to expedite speed down decisions right after device is
1979 * initially configured.
1981 * The following are speed down rules. #1 and #2 deal with
1984 * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors
1985 * occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO.
1987 * 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors
1988 * occurred during last 5 mins, NCQ_OFF.
1990 * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors
1991 * occurred during last 5 mins, FALLBACK_TO_PIO
1993 * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred
1994 * during last 10 mins, NCQ_OFF.
1996 * 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6
1997 * UNK_DEV errors occurred during last 10 mins, SPEED_DOWN.
2000 * Inherited from caller.
2003 * OR of ATA_EH_SPDN_* flags.
2005 static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
2007 const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ;
2008 u64 j64 = get_jiffies_64();
2009 struct speed_down_verdict_arg arg;
2010 unsigned int verdict = 0;
2012 /* scan past 5 mins of error history */
2013 memset(&arg, 0, sizeof(arg));
2014 arg.since = j64 - min(j64, j5mins);
2015 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
2017 if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] +
2018 arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1)
2019 verdict |= ATA_EH_SPDN_SPEED_DOWN |
2020 ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS;
2022 if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] +
2023 arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1)
2024 verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS;
2026 if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
2027 arg.nr_errors[ATA_ECAT_TOUT_HSM] +
2028 arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
2029 verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO;
2031 /* scan past 10 mins of error history */
2032 memset(&arg, 0, sizeof(arg));
2033 arg.since = j64 - min(j64, j10mins);
2034 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
2036 if (arg.nr_errors[ATA_ECAT_TOUT_HSM] +
2037 arg.nr_errors[ATA_ECAT_UNK_DEV] > 3)
2038 verdict |= ATA_EH_SPDN_NCQ_OFF;
2040 if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
2041 arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 ||
2042 arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
2043 verdict |= ATA_EH_SPDN_SPEED_DOWN;
2049 * ata_eh_speed_down - record error and speed down if necessary
2050 * @dev: Failed device
2051 * @eflags: mask of ATA_EFLAG_* flags
2052 * @err_mask: err_mask of the error
2054 * Record error and examine error history to determine whether
2055 * adjusting transmission speed is necessary. It also sets
2056 * transmission limits appropriately if such adjustment is
2060 * Kernel thread context (may sleep).
2063 * Determined recovery action.
2065 static unsigned int ata_eh_speed_down(struct ata_device *dev,
2066 unsigned int eflags, unsigned int err_mask)
2068 struct ata_link *link = ata_dev_phys_link(dev);
2070 unsigned int verdict;
2071 unsigned int action = 0;
2073 /* don't bother if Cat-0 error */
2074 if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0)
2077 /* record error and determine whether speed down is necessary */
2078 ata_ering_record(&dev->ering, eflags, err_mask);
2079 verdict = ata_eh_speed_down_verdict(dev);
2082 if ((verdict & ATA_EH_SPDN_NCQ_OFF) &&
2083 (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ |
2084 ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) {
2085 dev->flags |= ATA_DFLAG_NCQ_OFF;
2086 ata_dev_warn(dev, "NCQ disabled due to excessive errors\n");
2091 if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
2092 /* speed down SATA link speed if possible */
2093 if (sata_down_spd_limit(link, 0) == 0) {
2094 action |= ATA_EH_RESET;
2098 /* lower transfer mode */
2099 if (dev->spdn_cnt < 2) {
2100 static const int dma_dnxfer_sel[] =
2101 { ATA_DNXFER_DMA, ATA_DNXFER_40C };
2102 static const int pio_dnxfer_sel[] =
2103 { ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 };
2106 if (dev->xfer_shift != ATA_SHIFT_PIO)
2107 sel = dma_dnxfer_sel[dev->spdn_cnt];
2109 sel = pio_dnxfer_sel[dev->spdn_cnt];
2113 if (ata_down_xfermask_limit(dev, sel) == 0) {
2114 action |= ATA_EH_RESET;
2120 /* Fall back to PIO? Slowing down to PIO is meaningless for
2121 * SATA ATA devices. Consider it only for PATA and SATAPI.
2123 if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) &&
2124 (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) &&
2125 (dev->xfer_shift != ATA_SHIFT_PIO)) {
2126 if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) {
2128 action |= ATA_EH_RESET;
2135 /* device has been slowed down, blow error history */
2136 if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS))
2137 ata_ering_clear(&dev->ering);
2142 * ata_eh_worth_retry - analyze error and decide whether to retry
2143 * @qc: qc to possibly retry
2145 * Look at the cause of the error and decide if a retry
2146 * might be useful or not. We don't want to retry media errors
2147 * because the drive itself has probably already taken 10-30 seconds
2148 * doing its own internal retries before reporting the failure.
2150 static inline int ata_eh_worth_retry(struct ata_queued_cmd *qc)
2152 if (qc->err_mask & AC_ERR_MEDIA)
2153 return 0; /* don't retry media errors */
2154 if (qc->flags & ATA_QCFLAG_IO)
2155 return 1; /* otherwise retry anything from fs stack */
2156 if (qc->err_mask & AC_ERR_INVALID)
2157 return 0; /* don't retry these */
2158 return qc->err_mask != AC_ERR_DEV; /* retry if not dev error */
2162 * ata_eh_link_autopsy - analyze error and determine recovery action
2163 * @link: host link to perform autopsy on
2165 * Analyze why @link failed and determine which recovery actions
2166 * are needed. This function also sets more detailed AC_ERR_*
2167 * values and fills sense data for ATAPI CHECK SENSE.
2170 * Kernel thread context (may sleep).
2172 static void ata_eh_link_autopsy(struct ata_link *link)
2174 struct ata_port *ap = link->ap;
2175 struct ata_eh_context *ehc = &link->eh_context;
2176 struct ata_device *dev;
2177 unsigned int all_err_mask = 0, eflags = 0;
2184 if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
2187 /* obtain and analyze SError */
2188 rc = sata_scr_read(link, SCR_ERROR, &serror);
2190 ehc->i.serror |= serror;
2191 ata_eh_analyze_serror(link);
2192 } else if (rc != -EOPNOTSUPP) {
2193 /* SError read failed, force reset and probing */
2194 ehc->i.probe_mask |= ATA_ALL_DEVICES;
2195 ehc->i.action |= ATA_EH_RESET;
2196 ehc->i.err_mask |= AC_ERR_OTHER;
2199 /* analyze NCQ failure */
2200 ata_eh_analyze_ncq_error(link);
2202 /* any real error trumps AC_ERR_OTHER */
2203 if (ehc->i.err_mask & ~AC_ERR_OTHER)
2204 ehc->i.err_mask &= ~AC_ERR_OTHER;
2206 all_err_mask |= ehc->i.err_mask;
2208 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2209 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2211 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2212 ata_dev_phys_link(qc->dev) != link)
2215 /* inherit upper level err_mask */
2216 qc->err_mask |= ehc->i.err_mask;
2219 ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf);
2221 /* DEV errors are probably spurious in case of ATA_BUS error */
2222 if (qc->err_mask & AC_ERR_ATA_BUS)
2223 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
2226 /* any real error trumps unknown error */
2227 if (qc->err_mask & ~AC_ERR_OTHER)
2228 qc->err_mask &= ~AC_ERR_OTHER;
2231 * SENSE_VALID trumps dev/unknown error and revalidation. Upper
2232 * layers will determine whether the command is worth retrying
2233 * based on the sense data and device class/type. Otherwise,
2234 * determine directly if the command is worth retrying using its
2235 * error mask and flags.
2237 if (qc->flags & ATA_QCFLAG_SENSE_VALID)
2238 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
2239 else if (ata_eh_worth_retry(qc))
2240 qc->flags |= ATA_QCFLAG_RETRY;
2242 /* accumulate error info */
2243 ehc->i.dev = qc->dev;
2244 all_err_mask |= qc->err_mask;
2245 if (qc->flags & ATA_QCFLAG_IO)
2246 eflags |= ATA_EFLAG_IS_IO;
2247 trace_ata_eh_link_autopsy_qc(qc);
2250 /* enforce default EH actions */
2251 if (ap->pflags & ATA_PFLAG_FROZEN ||
2252 all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
2253 ehc->i.action |= ATA_EH_RESET;
2254 else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) ||
2255 (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV)))
2256 ehc->i.action |= ATA_EH_REVALIDATE;
2258 /* If we have offending qcs and the associated failed device,
2259 * perform per-dev EH action only on the offending device.
2262 ehc->i.dev_action[ehc->i.dev->devno] |=
2263 ehc->i.action & ATA_EH_PERDEV_MASK;
2264 ehc->i.action &= ~ATA_EH_PERDEV_MASK;
2267 /* propagate timeout to host link */
2268 if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link))
2269 ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT;
2271 /* record error and consider speeding down */
2273 if (!dev && ((ata_link_max_devices(link) == 1 &&
2274 ata_dev_enabled(link->device))))
2278 if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
2279 eflags |= ATA_EFLAG_DUBIOUS_XFER;
2280 ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
2281 trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask);
2287 * ata_eh_autopsy - analyze error and determine recovery action
2288 * @ap: host port to perform autopsy on
2290 * Analyze all links of @ap and determine why they failed and
2291 * which recovery actions are needed.
2294 * Kernel thread context (may sleep).
2296 void ata_eh_autopsy(struct ata_port *ap)
2298 struct ata_link *link;
2300 ata_for_each_link(link, ap, EDGE)
2301 ata_eh_link_autopsy(link);
2303 /* Handle the frigging slave link. Autopsy is done similarly
2304 * but actions and flags are transferred over to the master
2305 * link and handled from there.
2307 if (ap->slave_link) {
2308 struct ata_eh_context *mehc = &ap->link.eh_context;
2309 struct ata_eh_context *sehc = &ap->slave_link->eh_context;
2311 /* transfer control flags from master to slave */
2312 sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK;
2314 /* perform autopsy on the slave link */
2315 ata_eh_link_autopsy(ap->slave_link);
2317 /* transfer actions from slave to master and clear slave */
2318 ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2319 mehc->i.action |= sehc->i.action;
2320 mehc->i.dev_action[1] |= sehc->i.dev_action[1];
2321 mehc->i.flags |= sehc->i.flags;
2322 ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2325 /* Autopsy of fanout ports can affect host link autopsy.
2326 * Perform host link autopsy last.
2328 if (sata_pmp_attached(ap))
2329 ata_eh_link_autopsy(&ap->link);
2333 * ata_get_cmd_descript - get description for ATA command
2334 * @command: ATA command code to get description for
2336 * Return a textual description of the given command, or NULL if the
2337 * command is not known.
2342 const char *ata_get_cmd_descript(u8 command)
2344 #ifdef CONFIG_ATA_VERBOSE_ERROR
2350 { ATA_CMD_DEV_RESET, "DEVICE RESET" },
2351 { ATA_CMD_CHK_POWER, "CHECK POWER MODE" },
2352 { ATA_CMD_STANDBY, "STANDBY" },
2353 { ATA_CMD_IDLE, "IDLE" },
2354 { ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" },
2355 { ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" },
2356 { ATA_CMD_DOWNLOAD_MICRO_DMA, "DOWNLOAD MICROCODE DMA" },
2357 { ATA_CMD_NOP, "NOP" },
2358 { ATA_CMD_FLUSH, "FLUSH CACHE" },
2359 { ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" },
2360 { ATA_CMD_ID_ATA, "IDENTIFY DEVICE" },
2361 { ATA_CMD_ID_ATAPI, "IDENTIFY PACKET DEVICE" },
2362 { ATA_CMD_SERVICE, "SERVICE" },
2363 { ATA_CMD_READ, "READ DMA" },
2364 { ATA_CMD_READ_EXT, "READ DMA EXT" },
2365 { ATA_CMD_READ_QUEUED, "READ DMA QUEUED" },
2366 { ATA_CMD_READ_STREAM_EXT, "READ STREAM EXT" },
2367 { ATA_CMD_READ_STREAM_DMA_EXT, "READ STREAM DMA EXT" },
2368 { ATA_CMD_WRITE, "WRITE DMA" },
2369 { ATA_CMD_WRITE_EXT, "WRITE DMA EXT" },
2370 { ATA_CMD_WRITE_QUEUED, "WRITE DMA QUEUED EXT" },
2371 { ATA_CMD_WRITE_STREAM_EXT, "WRITE STREAM EXT" },
2372 { ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" },
2373 { ATA_CMD_WRITE_FUA_EXT, "WRITE DMA FUA EXT" },
2374 { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
2375 { ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" },
2376 { ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" },
2377 { ATA_CMD_NCQ_NON_DATA, "NCQ NON-DATA" },
2378 { ATA_CMD_FPDMA_SEND, "SEND FPDMA QUEUED" },
2379 { ATA_CMD_FPDMA_RECV, "RECEIVE FPDMA QUEUED" },
2380 { ATA_CMD_PIO_READ, "READ SECTOR(S)" },
2381 { ATA_CMD_PIO_READ_EXT, "READ SECTOR(S) EXT" },
2382 { ATA_CMD_PIO_WRITE, "WRITE SECTOR(S)" },
2383 { ATA_CMD_PIO_WRITE_EXT, "WRITE SECTOR(S) EXT" },
2384 { ATA_CMD_READ_MULTI, "READ MULTIPLE" },
2385 { ATA_CMD_READ_MULTI_EXT, "READ MULTIPLE EXT" },
2386 { ATA_CMD_WRITE_MULTI, "WRITE MULTIPLE" },
2387 { ATA_CMD_WRITE_MULTI_EXT, "WRITE MULTIPLE EXT" },
2388 { ATA_CMD_WRITE_MULTI_FUA_EXT, "WRITE MULTIPLE FUA EXT" },
2389 { ATA_CMD_SET_FEATURES, "SET FEATURES" },
2390 { ATA_CMD_SET_MULTI, "SET MULTIPLE MODE" },
2391 { ATA_CMD_VERIFY, "READ VERIFY SECTOR(S)" },
2392 { ATA_CMD_VERIFY_EXT, "READ VERIFY SECTOR(S) EXT" },
2393 { ATA_CMD_WRITE_UNCORR_EXT, "WRITE UNCORRECTABLE EXT" },
2394 { ATA_CMD_STANDBYNOW1, "STANDBY IMMEDIATE" },
2395 { ATA_CMD_IDLEIMMEDIATE, "IDLE IMMEDIATE" },
2396 { ATA_CMD_SLEEP, "SLEEP" },
2397 { ATA_CMD_INIT_DEV_PARAMS, "INITIALIZE DEVICE PARAMETERS" },
2398 { ATA_CMD_READ_NATIVE_MAX, "READ NATIVE MAX ADDRESS" },
2399 { ATA_CMD_READ_NATIVE_MAX_EXT, "READ NATIVE MAX ADDRESS EXT" },
2400 { ATA_CMD_SET_MAX, "SET MAX ADDRESS" },
2401 { ATA_CMD_SET_MAX_EXT, "SET MAX ADDRESS EXT" },
2402 { ATA_CMD_READ_LOG_EXT, "READ LOG EXT" },
2403 { ATA_CMD_WRITE_LOG_EXT, "WRITE LOG EXT" },
2404 { ATA_CMD_READ_LOG_DMA_EXT, "READ LOG DMA EXT" },
2405 { ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" },
2406 { ATA_CMD_TRUSTED_NONDATA, "TRUSTED NON-DATA" },
2407 { ATA_CMD_TRUSTED_RCV, "TRUSTED RECEIVE" },
2408 { ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" },
2409 { ATA_CMD_TRUSTED_SND, "TRUSTED SEND" },
2410 { ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" },
2411 { ATA_CMD_PMP_READ, "READ BUFFER" },
2412 { ATA_CMD_PMP_READ_DMA, "READ BUFFER DMA" },
2413 { ATA_CMD_PMP_WRITE, "WRITE BUFFER" },
2414 { ATA_CMD_PMP_WRITE_DMA, "WRITE BUFFER DMA" },
2415 { ATA_CMD_CONF_OVERLAY, "DEVICE CONFIGURATION OVERLAY" },
2416 { ATA_CMD_SEC_SET_PASS, "SECURITY SET PASSWORD" },
2417 { ATA_CMD_SEC_UNLOCK, "SECURITY UNLOCK" },
2418 { ATA_CMD_SEC_ERASE_PREP, "SECURITY ERASE PREPARE" },
2419 { ATA_CMD_SEC_ERASE_UNIT, "SECURITY ERASE UNIT" },
2420 { ATA_CMD_SEC_FREEZE_LOCK, "SECURITY FREEZE LOCK" },
2421 { ATA_CMD_SEC_DISABLE_PASS, "SECURITY DISABLE PASSWORD" },
2422 { ATA_CMD_CONFIG_STREAM, "CONFIGURE STREAM" },
2423 { ATA_CMD_SMART, "SMART" },
2424 { ATA_CMD_MEDIA_LOCK, "DOOR LOCK" },
2425 { ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" },
2426 { ATA_CMD_DSM, "DATA SET MANAGEMENT" },
2427 { ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" },
2428 { ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" },
2429 { ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" },
2430 { ATA_CMD_CFA_TRANS_SECT, "CFA TRANSLATE SECTOR" },
2431 { ATA_CMD_CFA_ERASE, "CFA ERASE SECTORS" },
2432 { ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" },
2433 { ATA_CMD_REQ_SENSE_DATA, "REQUEST SENSE DATA EXT" },
2434 { ATA_CMD_SANITIZE_DEVICE, "SANITIZE DEVICE" },
2435 { ATA_CMD_ZAC_MGMT_IN, "ZAC MANAGEMENT IN" },
2436 { ATA_CMD_ZAC_MGMT_OUT, "ZAC MANAGEMENT OUT" },
2437 { ATA_CMD_READ_LONG, "READ LONG (with retries)" },
2438 { ATA_CMD_READ_LONG_ONCE, "READ LONG (without retries)" },
2439 { ATA_CMD_WRITE_LONG, "WRITE LONG (with retries)" },
2440 { ATA_CMD_WRITE_LONG_ONCE, "WRITE LONG (without retries)" },
2441 { ATA_CMD_RESTORE, "RECALIBRATE" },
2442 { 0, NULL } /* terminate list */
2446 for (i = 0; cmd_descr[i].text; i++)
2447 if (cmd_descr[i].command == command)
2448 return cmd_descr[i].text;
2453 EXPORT_SYMBOL_GPL(ata_get_cmd_descript);
2456 * ata_eh_link_report - report error handling to user
2457 * @link: ATA link EH is going on
2459 * Report EH to user.
2464 static void ata_eh_link_report(struct ata_link *link)
2466 struct ata_port *ap = link->ap;
2467 struct ata_eh_context *ehc = &link->eh_context;
2468 const char *frozen, *desc;
2469 char tries_buf[16] = "";
2470 int tag, nr_failed = 0;
2472 if (ehc->i.flags & ATA_EHI_QUIET)
2476 if (ehc->i.desc[0] != '\0')
2479 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2480 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2482 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2483 ata_dev_phys_link(qc->dev) != link ||
2484 ((qc->flags & ATA_QCFLAG_QUIET) &&
2485 qc->err_mask == AC_ERR_DEV))
2487 if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
2493 if (!nr_failed && !ehc->i.err_mask)
2497 if (ap->pflags & ATA_PFLAG_FROZEN)
2500 if (ap->eh_tries < ATA_EH_MAX_TRIES)
2501 snprintf(tries_buf, sizeof(tries_buf), " t%d",
2505 ata_dev_err(ehc->i.dev, "exception Emask 0x%x "
2506 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2507 ehc->i.err_mask, link->sactive, ehc->i.serror,
2508 ehc->i.action, frozen, tries_buf);
2510 ata_dev_err(ehc->i.dev, "%s\n", desc);
2512 ata_link_err(link, "exception Emask 0x%x "
2513 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2514 ehc->i.err_mask, link->sactive, ehc->i.serror,
2515 ehc->i.action, frozen, tries_buf);
2517 ata_link_err(link, "%s\n", desc);
2520 #ifdef CONFIG_ATA_VERBOSE_ERROR
2523 "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
2524 ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "",
2525 ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "",
2526 ehc->i.serror & SERR_DATA ? "UnrecovData " : "",
2527 ehc->i.serror & SERR_PERSISTENT ? "Persist " : "",
2528 ehc->i.serror & SERR_PROTOCOL ? "Proto " : "",
2529 ehc->i.serror & SERR_INTERNAL ? "HostInt " : "",
2530 ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "",
2531 ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "",
2532 ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "",
2533 ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "",
2534 ehc->i.serror & SERR_DISPARITY ? "Dispar " : "",
2535 ehc->i.serror & SERR_CRC ? "BadCRC " : "",
2536 ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "",
2537 ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "",
2538 ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "",
2539 ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "",
2540 ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
2543 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2544 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2545 struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
2546 char data_buf[20] = "";
2547 char cdb_buf[70] = "";
2549 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2550 ata_dev_phys_link(qc->dev) != link || !qc->err_mask)
2553 if (qc->dma_dir != DMA_NONE) {
2554 static const char *dma_str[] = {
2555 [DMA_BIDIRECTIONAL] = "bidi",
2556 [DMA_TO_DEVICE] = "out",
2557 [DMA_FROM_DEVICE] = "in",
2559 const char *prot_str = NULL;
2561 switch (qc->tf.protocol) {
2562 case ATA_PROT_UNKNOWN:
2563 prot_str = "unknown";
2565 case ATA_PROT_NODATA:
2566 prot_str = "nodata";
2575 prot_str = "ncq dma";
2577 case ATA_PROT_NCQ_NODATA:
2578 prot_str = "ncq nodata";
2580 case ATAPI_PROT_NODATA:
2581 prot_str = "nodata";
2583 case ATAPI_PROT_PIO:
2586 case ATAPI_PROT_DMA:
2590 snprintf(data_buf, sizeof(data_buf), " %s %u %s",
2591 prot_str, qc->nbytes, dma_str[qc->dma_dir]);
2594 if (ata_is_atapi(qc->tf.protocol)) {
2595 const u8 *cdb = qc->cdb;
2596 size_t cdb_len = qc->dev->cdb_len;
2599 cdb = qc->scsicmd->cmnd;
2600 cdb_len = qc->scsicmd->cmd_len;
2602 __scsi_format_command(cdb_buf, sizeof(cdb_buf),
2605 const char *descr = ata_get_cmd_descript(cmd->command);
2607 ata_dev_err(qc->dev, "failed command: %s\n",
2611 ata_dev_err(qc->dev,
2612 "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2614 "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2615 "Emask 0x%x (%s)%s\n",
2616 cmd->command, cmd->feature, cmd->nsect,
2617 cmd->lbal, cmd->lbam, cmd->lbah,
2618 cmd->hob_feature, cmd->hob_nsect,
2619 cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah,
2620 cmd->device, qc->tag, data_buf, cdb_buf,
2621 res->command, res->feature, res->nsect,
2622 res->lbal, res->lbam, res->lbah,
2623 res->hob_feature, res->hob_nsect,
2624 res->hob_lbal, res->hob_lbam, res->hob_lbah,
2625 res->device, qc->err_mask, ata_err_string(qc->err_mask),
2626 qc->err_mask & AC_ERR_NCQ ? " <F>" : "");
2628 #ifdef CONFIG_ATA_VERBOSE_ERROR
2629 if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
2630 ATA_SENSE | ATA_ERR)) {
2631 if (res->command & ATA_BUSY)
2632 ata_dev_err(qc->dev, "status: { Busy }\n");
2634 ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n",
2635 res->command & ATA_DRDY ? "DRDY " : "",
2636 res->command & ATA_DF ? "DF " : "",
2637 res->command & ATA_DRQ ? "DRQ " : "",
2638 res->command & ATA_SENSE ? "SENSE " : "",
2639 res->command & ATA_ERR ? "ERR " : "");
2642 if (cmd->command != ATA_CMD_PACKET &&
2643 (res->feature & (ATA_ICRC | ATA_UNC | ATA_AMNF |
2644 ATA_IDNF | ATA_ABORTED)))
2645 ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n",
2646 res->feature & ATA_ICRC ? "ICRC " : "",
2647 res->feature & ATA_UNC ? "UNC " : "",
2648 res->feature & ATA_AMNF ? "AMNF " : "",
2649 res->feature & ATA_IDNF ? "IDNF " : "",
2650 res->feature & ATA_ABORTED ? "ABRT " : "");
2656 * ata_eh_report - report error handling to user
2657 * @ap: ATA port to report EH about
2659 * Report EH to user.
2664 void ata_eh_report(struct ata_port *ap)
2666 struct ata_link *link;
2668 ata_for_each_link(link, ap, HOST_FIRST)
2669 ata_eh_link_report(link);
2672 static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
2673 unsigned int *classes, unsigned long deadline,
2676 struct ata_device *dev;
2679 ata_for_each_dev(dev, link, ALL)
2680 classes[dev->devno] = ATA_DEV_UNKNOWN;
2682 return reset(link, classes, deadline);
2685 static int ata_eh_followup_srst_needed(struct ata_link *link, int rc)
2687 if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link))
2691 if (sata_pmp_supported(link->ap) && ata_is_host_link(link))
2696 int ata_eh_reset(struct ata_link *link, int classify,
2697 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
2698 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
2700 struct ata_port *ap = link->ap;
2701 struct ata_link *slave = ap->slave_link;
2702 struct ata_eh_context *ehc = &link->eh_context;
2703 struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL;
2704 unsigned int *classes = ehc->classes;
2705 unsigned int lflags = link->flags;
2706 int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
2707 int max_tries = 0, try = 0;
2708 struct ata_link *failed_link;
2709 struct ata_device *dev;
2710 unsigned long deadline, now;
2711 ata_reset_fn_t reset;
2712 unsigned long flags;
2719 while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX)
2721 if (link->flags & ATA_LFLAG_RST_ONCE)
2723 if (link->flags & ATA_LFLAG_NO_HRST)
2725 if (link->flags & ATA_LFLAG_NO_SRST)
2728 /* make sure each reset attempt is at least COOL_DOWN apart */
2729 if (ehc->i.flags & ATA_EHI_DID_RESET) {
2731 WARN_ON(time_after(ehc->last_reset, now));
2732 deadline = ata_deadline(ehc->last_reset,
2733 ATA_EH_RESET_COOL_DOWN);
2734 if (time_before(now, deadline))
2735 schedule_timeout_uninterruptible(deadline - now);
2738 spin_lock_irqsave(ap->lock, flags);
2739 ap->pflags |= ATA_PFLAG_RESETTING;
2740 spin_unlock_irqrestore(ap->lock, flags);
2742 ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2744 ata_for_each_dev(dev, link, ALL) {
2745 /* If we issue an SRST then an ATA drive (not ATAPI)
2746 * may change configuration and be in PIO0 timing. If
2747 * we do a hard reset (or are coming from power on)
2748 * this is true for ATA or ATAPI. Until we've set a
2749 * suitable controller mode we should not touch the
2750 * bus as we may be talking too fast.
2752 dev->pio_mode = XFER_PIO_0;
2753 dev->dma_mode = 0xff;
2755 /* If the controller has a pio mode setup function
2756 * then use it to set the chipset to rights. Don't
2757 * touch the DMA setup as that will be dealt with when
2758 * configuring devices.
2760 if (ap->ops->set_piomode)
2761 ap->ops->set_piomode(ap, dev);
2764 /* prefer hardreset */
2766 ehc->i.action &= ~ATA_EH_RESET;
2769 ehc->i.action |= ATA_EH_HARDRESET;
2770 } else if (softreset) {
2772 ehc->i.action |= ATA_EH_SOFTRESET;
2776 unsigned long deadline = ata_deadline(jiffies,
2777 ATA_EH_PRERESET_TIMEOUT);
2780 sehc->i.action &= ~ATA_EH_RESET;
2781 sehc->i.action |= ehc->i.action;
2784 rc = prereset(link, deadline);
2786 /* If present, do prereset on slave link too. Reset
2787 * is skipped iff both master and slave links report
2788 * -ENOENT or clear ATA_EH_RESET.
2790 if (slave && (rc == 0 || rc == -ENOENT)) {
2793 tmp = prereset(slave, deadline);
2797 ehc->i.action |= sehc->i.action;
2801 if (rc == -ENOENT) {
2802 ata_link_dbg(link, "port disabled--ignoring\n");
2803 ehc->i.action &= ~ATA_EH_RESET;
2805 ata_for_each_dev(dev, link, ALL)
2806 classes[dev->devno] = ATA_DEV_NONE;
2811 "prereset failed (errno=%d)\n",
2816 /* prereset() might have cleared ATA_EH_RESET. If so,
2817 * bang classes, thaw and return.
2819 if (reset && !(ehc->i.action & ATA_EH_RESET)) {
2820 ata_for_each_dev(dev, link, ALL)
2821 classes[dev->devno] = ATA_DEV_NONE;
2822 if ((ap->pflags & ATA_PFLAG_FROZEN) &&
2823 ata_is_host_link(link))
2824 ata_eh_thaw_port(ap);
2834 if (ata_is_host_link(link))
2835 ata_eh_freeze_port(ap);
2837 deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]);
2841 ata_link_info(link, "%s resetting link\n",
2842 reset == softreset ? "soft" : "hard");
2844 /* mark that this EH session started with reset */
2845 ehc->last_reset = jiffies;
2846 if (reset == hardreset)
2847 ehc->i.flags |= ATA_EHI_DID_HARDRESET;
2849 ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
2851 rc = ata_do_reset(link, reset, classes, deadline, true);
2852 if (rc && rc != -EAGAIN) {
2857 /* hardreset slave link if existent */
2858 if (slave && reset == hardreset) {
2862 ata_link_info(slave, "hard resetting link\n");
2864 ata_eh_about_to_do(slave, NULL, ATA_EH_RESET);
2865 tmp = ata_do_reset(slave, reset, classes, deadline,
2873 failed_link = slave;
2879 /* perform follow-up SRST if necessary */
2880 if (reset == hardreset &&
2881 ata_eh_followup_srst_needed(link, rc)) {
2886 "follow-up softreset required but no softreset available\n");
2892 ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2893 rc = ata_do_reset(link, reset, classes, deadline, true);
2902 "no reset method available, skipping reset\n");
2903 if (!(lflags & ATA_LFLAG_ASSUME_CLASS))
2904 lflags |= ATA_LFLAG_ASSUME_ATA;
2908 * Post-reset processing
2910 ata_for_each_dev(dev, link, ALL) {
2911 /* After the reset, the device state is PIO 0 and the
2912 * controller state is undefined. Reset also wakes up
2913 * drives from sleeping mode.
2915 dev->pio_mode = XFER_PIO_0;
2916 dev->flags &= ~ATA_DFLAG_SLEEPING;
2918 if (ata_phys_link_offline(ata_dev_phys_link(dev)))
2921 /* apply class override */
2922 if (lflags & ATA_LFLAG_ASSUME_ATA)
2923 classes[dev->devno] = ATA_DEV_ATA;
2924 else if (lflags & ATA_LFLAG_ASSUME_SEMB)
2925 classes[dev->devno] = ATA_DEV_SEMB_UNSUP;
2928 /* record current link speed */
2929 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0)
2930 link->sata_spd = (sstatus >> 4) & 0xf;
2931 if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0)
2932 slave->sata_spd = (sstatus >> 4) & 0xf;
2935 if (ata_is_host_link(link))
2936 ata_eh_thaw_port(ap);
2938 /* postreset() should clear hardware SError. Although SError
2939 * is cleared during link resume, clearing SError here is
2940 * necessary as some PHYs raise hotplug events after SRST.
2941 * This introduces race condition where hotplug occurs between
2942 * reset and here. This race is mediated by cross checking
2943 * link onlineness and classification result later.
2946 postreset(link, classes);
2948 postreset(slave, classes);
2951 /* clear cached SError */
2952 spin_lock_irqsave(link->ap->lock, flags);
2953 link->eh_info.serror = 0;
2955 slave->eh_info.serror = 0;
2956 spin_unlock_irqrestore(link->ap->lock, flags);
2958 if (ap->pflags & ATA_PFLAG_FROZEN)
2959 ata_eh_thaw_port(ap);
2962 * Make sure onlineness and classification result correspond.
2963 * Hotplug could have happened during reset and some
2964 * controllers fail to wait while a drive is spinning up after
2965 * being hotplugged causing misdetection. By cross checking
2966 * link on/offlineness and classification result, those
2967 * conditions can be reliably detected and retried.
2970 ata_for_each_dev(dev, link, ALL) {
2971 if (ata_phys_link_online(ata_dev_phys_link(dev))) {
2972 if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2973 ata_dev_dbg(dev, "link online but device misclassified\n");
2974 classes[dev->devno] = ATA_DEV_NONE;
2977 } else if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
2978 if (ata_class_enabled(classes[dev->devno]))
2980 "link offline, clearing class %d to NONE\n",
2981 classes[dev->devno]);
2982 classes[dev->devno] = ATA_DEV_NONE;
2983 } else if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2985 "link status unknown, clearing UNKNOWN to NONE\n");
2986 classes[dev->devno] = ATA_DEV_NONE;
2990 if (classify && nr_unknown) {
2991 if (try < max_tries) {
2993 "link online but %d devices misclassified, retrying\n",
3000 "link online but %d devices misclassified, "
3001 "device detection might fail\n", nr_unknown);
3004 /* reset successful, schedule revalidation */
3005 ata_eh_done(link, NULL, ATA_EH_RESET);
3007 ata_eh_done(slave, NULL, ATA_EH_RESET);
3008 ehc->last_reset = jiffies; /* update to completion time */
3009 ehc->i.action |= ATA_EH_REVALIDATE;
3010 link->lpm_policy = ATA_LPM_UNKNOWN; /* reset LPM state */
3014 /* clear hotplug flag */
3015 ehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
3017 sehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
3019 spin_lock_irqsave(ap->lock, flags);
3020 ap->pflags &= ~ATA_PFLAG_RESETTING;
3021 spin_unlock_irqrestore(ap->lock, flags);
3026 /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */
3027 if (!ata_is_host_link(link) &&
3028 sata_scr_read(link, SCR_STATUS, &sstatus))
3031 if (try >= max_tries) {
3033 * Thaw host port even if reset failed, so that the port
3034 * can be retried on the next phy event. This risks
3035 * repeated EH runs but seems to be a better tradeoff than
3036 * shutting down a port after a botched hotplug attempt.
3038 if (ata_is_host_link(link))
3039 ata_eh_thaw_port(ap);
3044 if (time_before(now, deadline)) {
3045 unsigned long delta = deadline - now;
3047 ata_link_warn(failed_link,
3048 "reset failed (errno=%d), retrying in %u secs\n",
3049 rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000));
3053 delta = schedule_timeout_uninterruptible(delta);
3058 * While disks spinup behind PMP, some controllers fail sending SRST.
3059 * They need to be reset - as well as the PMP - before retrying.
3061 if (rc == -ERESTART) {
3062 if (ata_is_host_link(link))
3063 ata_eh_thaw_port(ap);
3067 if (try == max_tries - 1) {
3068 sata_down_spd_limit(link, 0);
3070 sata_down_spd_limit(slave, 0);
3071 } else if (rc == -EPIPE)
3072 sata_down_spd_limit(failed_link, 0);
3079 static inline void ata_eh_pull_park_action(struct ata_port *ap)
3081 struct ata_link *link;
3082 struct ata_device *dev;
3083 unsigned long flags;
3086 * This function can be thought of as an extended version of
3087 * ata_eh_about_to_do() specially crafted to accommodate the
3088 * requirements of ATA_EH_PARK handling. Since the EH thread
3089 * does not leave the do {} while () loop in ata_eh_recover as
3090 * long as the timeout for a park request to *one* device on
3091 * the port has not expired, and since we still want to pick
3092 * up park requests to other devices on the same port or
3093 * timeout updates for the same device, we have to pull
3094 * ATA_EH_PARK actions from eh_info into eh_context.i
3095 * ourselves at the beginning of each pass over the loop.
3097 * Additionally, all write accesses to &ap->park_req_pending
3098 * through reinit_completion() (see below) or complete_all()
3099 * (see ata_scsi_park_store()) are protected by the host lock.
3100 * As a result we have that park_req_pending.done is zero on
3101 * exit from this function, i.e. when ATA_EH_PARK actions for
3102 * *all* devices on port ap have been pulled into the
3103 * respective eh_context structs. If, and only if,
3104 * park_req_pending.done is non-zero by the time we reach
3105 * wait_for_completion_timeout(), another ATA_EH_PARK action
3106 * has been scheduled for at least one of the devices on port
3107 * ap and we have to cycle over the do {} while () loop in
3108 * ata_eh_recover() again.
3111 spin_lock_irqsave(ap->lock, flags);
3112 reinit_completion(&ap->park_req_pending);
3113 ata_for_each_link(link, ap, EDGE) {
3114 ata_for_each_dev(dev, link, ALL) {
3115 struct ata_eh_info *ehi = &link->eh_info;
3117 link->eh_context.i.dev_action[dev->devno] |=
3118 ehi->dev_action[dev->devno] & ATA_EH_PARK;
3119 ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK);
3122 spin_unlock_irqrestore(ap->lock, flags);
3125 static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
3127 struct ata_eh_context *ehc = &dev->link->eh_context;
3128 struct ata_taskfile tf;
3129 unsigned int err_mask;
3131 ata_tf_init(dev, &tf);
3133 ehc->unloaded_mask |= 1 << dev->devno;
3134 tf.command = ATA_CMD_IDLEIMMEDIATE;
3140 ehc->unloaded_mask &= ~(1 << dev->devno);
3141 tf.command = ATA_CMD_CHK_POWER;
3144 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
3145 tf.protocol = ATA_PROT_NODATA;
3146 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
3147 if (park && (err_mask || tf.lbal != 0xc4)) {
3148 ata_dev_err(dev, "head unload failed!\n");
3149 ehc->unloaded_mask &= ~(1 << dev->devno);
3153 static int ata_eh_revalidate_and_attach(struct ata_link *link,
3154 struct ata_device **r_failed_dev)
3156 struct ata_port *ap = link->ap;
3157 struct ata_eh_context *ehc = &link->eh_context;
3158 struct ata_device *dev;
3159 unsigned int new_mask = 0;
3160 unsigned long flags;
3165 /* For PATA drive side cable detection to work, IDENTIFY must
3166 * be done backwards such that PDIAG- is released by the slave
3167 * device before the master device is identified.
3169 ata_for_each_dev(dev, link, ALL_REVERSE) {
3170 unsigned int action = ata_eh_dev_action(dev);
3171 unsigned int readid_flags = 0;
3173 if (ehc->i.flags & ATA_EHI_DID_RESET)
3174 readid_flags |= ATA_READID_POSTRESET;
3176 if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
3177 WARN_ON(dev->class == ATA_DEV_PMP);
3179 if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
3184 ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE);
3185 rc = ata_dev_revalidate(dev, ehc->classes[dev->devno],
3190 ata_eh_done(link, dev, ATA_EH_REVALIDATE);
3192 /* Configuration may have changed, reconfigure
3195 ehc->i.flags |= ATA_EHI_SETMODE;
3197 /* schedule the scsi_rescan_device() here */
3198 schedule_work(&(ap->scsi_rescan_task));
3199 } else if (dev->class == ATA_DEV_UNKNOWN &&
3200 ehc->tries[dev->devno] &&
3201 ata_class_enabled(ehc->classes[dev->devno])) {
3202 /* Temporarily set dev->class, it will be
3203 * permanently set once all configurations are
3204 * complete. This is necessary because new
3205 * device configuration is done in two
3208 dev->class = ehc->classes[dev->devno];
3210 if (dev->class == ATA_DEV_PMP)
3211 rc = sata_pmp_attach(dev);
3213 rc = ata_dev_read_id(dev, &dev->class,
3214 readid_flags, dev->id);
3216 /* read_id might have changed class, store and reset */
3217 ehc->classes[dev->devno] = dev->class;
3218 dev->class = ATA_DEV_UNKNOWN;
3222 /* clear error info accumulated during probe */
3223 ata_ering_clear(&dev->ering);
3224 new_mask |= 1 << dev->devno;
3227 /* IDENTIFY was issued to non-existent
3228 * device. No need to reset. Just
3229 * thaw and ignore the device.
3231 ata_eh_thaw_port(ap);
3239 /* PDIAG- should have been released, ask cable type if post-reset */
3240 if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) {
3241 if (ap->ops->cable_detect)
3242 ap->cbl = ap->ops->cable_detect(ap);
3246 /* Configure new devices forward such that user doesn't see
3247 * device detection messages backwards.
3249 ata_for_each_dev(dev, link, ALL) {
3250 if (!(new_mask & (1 << dev->devno)))
3253 dev->class = ehc->classes[dev->devno];
3255 if (dev->class == ATA_DEV_PMP)
3258 ehc->i.flags |= ATA_EHI_PRINTINFO;
3259 rc = ata_dev_configure(dev);
3260 ehc->i.flags &= ~ATA_EHI_PRINTINFO;
3262 dev->class = ATA_DEV_UNKNOWN;
3266 spin_lock_irqsave(ap->lock, flags);
3267 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
3268 spin_unlock_irqrestore(ap->lock, flags);
3270 /* new device discovered, configure xfermode */
3271 ehc->i.flags |= ATA_EHI_SETMODE;
3277 *r_failed_dev = dev;
3278 DPRINTK("EXIT rc=%d\n", rc);
3283 * ata_set_mode - Program timings and issue SET FEATURES - XFER
3284 * @link: link on which timings will be programmed
3285 * @r_failed_dev: out parameter for failed device
3287 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3288 * ata_set_mode() fails, pointer to the failing device is
3289 * returned in @r_failed_dev.
3292 * PCI/etc. bus probe sem.
3295 * 0 on success, negative errno otherwise
3297 int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3299 struct ata_port *ap = link->ap;
3300 struct ata_device *dev;
3303 /* if data transfer is verified, clear DUBIOUS_XFER on ering top */
3304 ata_for_each_dev(dev, link, ENABLED) {
3305 if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) {
3306 struct ata_ering_entry *ent;
3308 ent = ata_ering_top(&dev->ering);
3310 ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER;
3314 /* has private set_mode? */
3315 if (ap->ops->set_mode)
3316 rc = ap->ops->set_mode(link, r_failed_dev);
3318 rc = ata_do_set_mode(link, r_failed_dev);
3320 /* if transfer mode has changed, set DUBIOUS_XFER on device */
3321 ata_for_each_dev(dev, link, ENABLED) {
3322 struct ata_eh_context *ehc = &link->eh_context;
3323 u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno];
3324 u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno));
3326 if (dev->xfer_mode != saved_xfer_mode ||
3327 ata_ncq_enabled(dev) != saved_ncq)
3328 dev->flags |= ATA_DFLAG_DUBIOUS_XFER;
3335 * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset
3336 * @dev: ATAPI device to clear UA for
3338 * Resets and other operations can make an ATAPI device raise
3339 * UNIT ATTENTION which causes the next operation to fail. This
3340 * function clears UA.
3343 * EH context (may sleep).
3346 * 0 on success, -errno on failure.
3348 static int atapi_eh_clear_ua(struct ata_device *dev)
3352 for (i = 0; i < ATA_EH_UA_TRIES; i++) {
3353 u8 *sense_buffer = dev->link->ap->sector_buf;
3355 unsigned int err_mask;
3357 err_mask = atapi_eh_tur(dev, &sense_key);
3358 if (err_mask != 0 && err_mask != AC_ERR_DEV) {
3360 "TEST_UNIT_READY failed (err_mask=0x%x)\n",
3365 if (!err_mask || sense_key != UNIT_ATTENTION)
3368 err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key);
3370 ata_dev_warn(dev, "failed to clear "
3371 "UNIT ATTENTION (err_mask=0x%x)\n", err_mask);
3376 ata_dev_warn(dev, "UNIT ATTENTION persists after %d tries\n",
3383 * ata_eh_maybe_retry_flush - Retry FLUSH if necessary
3384 * @dev: ATA device which may need FLUSH retry
3386 * If @dev failed FLUSH, it needs to be reported upper layer
3387 * immediately as it means that @dev failed to remap and already
3388 * lost at least a sector and further FLUSH retrials won't make
3389 * any difference to the lost sector. However, if FLUSH failed
3390 * for other reasons, for example transmission error, FLUSH needs
3393 * This function determines whether FLUSH failure retry is
3394 * necessary and performs it if so.
3397 * 0 if EH can continue, -errno if EH needs to be repeated.
3399 static int ata_eh_maybe_retry_flush(struct ata_device *dev)
3401 struct ata_link *link = dev->link;
3402 struct ata_port *ap = link->ap;
3403 struct ata_queued_cmd *qc;
3404 struct ata_taskfile tf;
3405 unsigned int err_mask;
3408 /* did flush fail for this device? */
3409 if (!ata_tag_valid(link->active_tag))
3412 qc = __ata_qc_from_tag(ap, link->active_tag);
3413 if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT &&
3414 qc->tf.command != ATA_CMD_FLUSH))
3417 /* if the device failed it, it should be reported to upper layers */
3418 if (qc->err_mask & AC_ERR_DEV)
3421 /* flush failed for some other reason, give it another shot */
3422 ata_tf_init(dev, &tf);
3424 tf.command = qc->tf.command;
3425 tf.flags |= ATA_TFLAG_DEVICE;
3426 tf.protocol = ATA_PROT_NODATA;
3428 ata_dev_warn(dev, "retrying FLUSH 0x%x Emask 0x%x\n",
3429 tf.command, qc->err_mask);
3431 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
3434 * FLUSH is complete but there's no way to
3435 * successfully complete a failed command from EH.
3436 * Making sure retry is allowed at least once and
3437 * retrying it should do the trick - whatever was in
3438 * the cache is already on the platter and this won't
3439 * cause infinite loop.
3441 qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1);
3443 ata_dev_warn(dev, "FLUSH failed Emask 0x%x\n",
3447 /* if device failed it, report it to upper layers */
3448 if (err_mask & AC_ERR_DEV) {
3449 qc->err_mask |= AC_ERR_DEV;
3451 if (!(ap->pflags & ATA_PFLAG_FROZEN))
3459 * ata_eh_set_lpm - configure SATA interface power management
3460 * @link: link to configure power management
3461 * @policy: the link power management policy
3462 * @r_failed_dev: out parameter for failed device
3464 * Enable SATA Interface power management. This will enable
3465 * Device Interface Power Management (DIPM) for min_power
3466 * policy, and then call driver specific callbacks for
3467 * enabling Host Initiated Power management.
3473 * 0 on success, -errno on failure.
3475 static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3476 struct ata_device **r_failed_dev)
3478 struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL;
3479 struct ata_eh_context *ehc = &link->eh_context;
3480 struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL;
3481 enum ata_lpm_policy old_policy = link->lpm_policy;
3482 bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM;
3483 unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM;
3484 unsigned int err_mask;
3487 /* if the link or host doesn't do LPM, noop */
3488 if ((link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm))
3492 * DIPM is enabled only for MIN_POWER as some devices
3493 * misbehave when the host NACKs transition to SLUMBER. Order
3494 * device and link configurations such that the host always
3495 * allows DIPM requests.
3497 ata_for_each_dev(dev, link, ENABLED) {
3498 bool hipm = ata_id_has_hipm(dev->id);
3499 bool dipm = ata_id_has_dipm(dev->id) && !no_dipm;
3501 /* find the first enabled and LPM enabled devices */
3505 if (!lpm_dev && (hipm || dipm))
3508 hints &= ~ATA_LPM_EMPTY;
3510 hints &= ~ATA_LPM_HIPM;
3512 /* disable DIPM before changing link config */
3513 if (policy != ATA_LPM_MIN_POWER && dipm) {
3514 err_mask = ata_dev_set_feature(dev,
3515 SETFEATURES_SATA_DISABLE, SATA_DIPM);
3516 if (err_mask && err_mask != AC_ERR_DEV) {
3518 "failed to disable DIPM, Emask 0x%x\n",
3527 rc = ap->ops->set_lpm(link, policy, hints);
3528 if (!rc && ap->slave_link)
3529 rc = ap->ops->set_lpm(ap->slave_link, policy, hints);
3531 rc = sata_pmp_set_lpm(link, policy, hints);
3534 * Attribute link config failure to the first (LPM) enabled
3535 * device on the link.
3538 if (rc == -EOPNOTSUPP) {
3539 link->flags |= ATA_LFLAG_NO_LPM;
3542 dev = lpm_dev ? lpm_dev : link_dev;
3547 * Low level driver acked the transition. Issue DIPM command
3548 * with the new policy set.
3550 link->lpm_policy = policy;
3551 if (ap && ap->slave_link)
3552 ap->slave_link->lpm_policy = policy;
3554 /* host config updated, enable DIPM if transitioning to MIN_POWER */
3555 ata_for_each_dev(dev, link, ENABLED) {
3556 if (policy == ATA_LPM_MIN_POWER && !no_dipm &&
3557 ata_id_has_dipm(dev->id)) {
3558 err_mask = ata_dev_set_feature(dev,
3559 SETFEATURES_SATA_ENABLE, SATA_DIPM);
3560 if (err_mask && err_mask != AC_ERR_DEV) {
3562 "failed to enable DIPM, Emask 0x%x\n",
3570 link->last_lpm_change = jiffies;
3571 link->flags |= ATA_LFLAG_CHANGED;
3576 /* restore the old policy */
3577 link->lpm_policy = old_policy;
3578 if (ap && ap->slave_link)
3579 ap->slave_link->lpm_policy = old_policy;
3581 /* if no device or only one more chance is left, disable LPM */
3582 if (!dev || ehc->tries[dev->devno] <= 2) {
3583 ata_link_warn(link, "disabling LPM on the link\n");
3584 link->flags |= ATA_LFLAG_NO_LPM;
3587 *r_failed_dev = dev;
3591 int ata_link_nr_enabled(struct ata_link *link)
3593 struct ata_device *dev;
3596 ata_for_each_dev(dev, link, ENABLED)
3601 static int ata_link_nr_vacant(struct ata_link *link)
3603 struct ata_device *dev;
3606 ata_for_each_dev(dev, link, ALL)
3607 if (dev->class == ATA_DEV_UNKNOWN)
3612 static int ata_eh_skip_recovery(struct ata_link *link)
3614 struct ata_port *ap = link->ap;
3615 struct ata_eh_context *ehc = &link->eh_context;
3616 struct ata_device *dev;
3618 /* skip disabled links */
3619 if (link->flags & ATA_LFLAG_DISABLED)
3622 /* skip if explicitly requested */
3623 if (ehc->i.flags & ATA_EHI_NO_RECOVERY)
3626 /* thaw frozen port and recover failed devices */
3627 if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
3630 /* reset at least once if reset is requested */
3631 if ((ehc->i.action & ATA_EH_RESET) &&
3632 !(ehc->i.flags & ATA_EHI_DID_RESET))
3635 /* skip if class codes for all vacant slots are ATA_DEV_NONE */
3636 ata_for_each_dev(dev, link, ALL) {
3637 if (dev->class == ATA_DEV_UNKNOWN &&
3638 ehc->classes[dev->devno] != ATA_DEV_NONE)
3645 static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg)
3647 u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL);
3648 u64 now = get_jiffies_64();
3649 int *trials = void_arg;
3651 if ((ent->eflags & ATA_EFLAG_OLD_ER) ||
3652 (ent->timestamp < now - min(now, interval)))
3659 static int ata_eh_schedule_probe(struct ata_device *dev)
3661 struct ata_eh_context *ehc = &dev->link->eh_context;
3662 struct ata_link *link = ata_dev_phys_link(dev);
3665 if (!(ehc->i.probe_mask & (1 << dev->devno)) ||
3666 (ehc->did_probe_mask & (1 << dev->devno)))
3669 ata_eh_detach_dev(dev);
3671 ehc->did_probe_mask |= (1 << dev->devno);
3672 ehc->i.action |= ATA_EH_RESET;
3673 ehc->saved_xfer_mode[dev->devno] = 0;
3674 ehc->saved_ncq_enabled &= ~(1 << dev->devno);
3676 /* the link maybe in a deep sleep, wake it up */
3677 if (link->lpm_policy > ATA_LPM_MAX_POWER) {
3678 if (ata_is_host_link(link))
3679 link->ap->ops->set_lpm(link, ATA_LPM_MAX_POWER,
3682 sata_pmp_set_lpm(link, ATA_LPM_MAX_POWER,
3686 /* Record and count probe trials on the ering. The specific
3687 * error mask used is irrelevant. Because a successful device
3688 * detection clears the ering, this count accumulates only if
3689 * there are consecutive failed probes.
3691 * If the count is equal to or higher than ATA_EH_PROBE_TRIALS
3692 * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is
3693 * forced to 1.5Gbps.
3695 * This is to work around cases where failed link speed
3696 * negotiation results in device misdetection leading to
3697 * infinite DEVXCHG or PHRDY CHG events.
3699 ata_ering_record(&dev->ering, 0, AC_ERR_OTHER);
3700 ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials);
3702 if (trials > ATA_EH_PROBE_TRIALS)
3703 sata_down_spd_limit(link, 1);
3708 static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
3710 struct ata_eh_context *ehc = &dev->link->eh_context;
3712 /* -EAGAIN from EH routine indicates retry without prejudice.
3713 * The requester is responsible for ensuring forward progress.
3716 ehc->tries[dev->devno]--;
3720 /* device missing or wrong IDENTIFY data, schedule probing */
3721 ehc->i.probe_mask |= (1 << dev->devno);
3723 /* give it just one more chance */
3724 ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
3726 if (ehc->tries[dev->devno] == 1) {
3727 /* This is the last chance, better to slow
3728 * down than lose it.
3730 sata_down_spd_limit(ata_dev_phys_link(dev), 0);
3731 if (dev->pio_mode > XFER_PIO_0)
3732 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
3736 if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
3737 /* disable device if it has used up all its chances */
3738 ata_dev_disable(dev);
3740 /* detach if offline */
3741 if (ata_phys_link_offline(ata_dev_phys_link(dev)))
3742 ata_eh_detach_dev(dev);
3744 /* schedule probe if necessary */
3745 if (ata_eh_schedule_probe(dev)) {
3746 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
3747 memset(ehc->cmd_timeout_idx[dev->devno], 0,
3748 sizeof(ehc->cmd_timeout_idx[dev->devno]));
3753 ehc->i.action |= ATA_EH_RESET;
3759 * ata_eh_recover - recover host port after error
3760 * @ap: host port to recover
3761 * @prereset: prereset method (can be NULL)
3762 * @softreset: softreset method (can be NULL)
3763 * @hardreset: hardreset method (can be NULL)
3764 * @postreset: postreset method (can be NULL)
3765 * @r_failed_link: out parameter for failed link
3767 * This is the alpha and omega, eum and yang, heart and soul of
3768 * libata exception handling. On entry, actions required to
3769 * recover each link and hotplug requests are recorded in the
3770 * link's eh_context. This function executes all the operations
3771 * with appropriate retrials and fallbacks to resurrect failed
3772 * devices, detach goners and greet newcomers.
3775 * Kernel thread context (may sleep).
3778 * 0 on success, -errno on failure.
3780 int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3781 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3782 ata_postreset_fn_t postreset,
3783 struct ata_link **r_failed_link)
3785 struct ata_link *link;
3786 struct ata_device *dev;
3788 unsigned long flags, deadline;
3792 /* prep for recovery */
3793 ata_for_each_link(link, ap, EDGE) {
3794 struct ata_eh_context *ehc = &link->eh_context;
3796 /* re-enable link? */
3797 if (ehc->i.action & ATA_EH_ENABLE_LINK) {
3798 ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK);
3799 spin_lock_irqsave(ap->lock, flags);
3800 link->flags &= ~ATA_LFLAG_DISABLED;
3801 spin_unlock_irqrestore(ap->lock, flags);
3802 ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK);
3805 ata_for_each_dev(dev, link, ALL) {
3806 if (link->flags & ATA_LFLAG_NO_RETRY)
3807 ehc->tries[dev->devno] = 1;
3809 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
3811 /* collect port action mask recorded in dev actions */
3812 ehc->i.action |= ehc->i.dev_action[dev->devno] &
3813 ~ATA_EH_PERDEV_MASK;
3814 ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK;
3816 /* process hotplug request */
3817 if (dev->flags & ATA_DFLAG_DETACH)
3818 ata_eh_detach_dev(dev);
3820 /* schedule probe if necessary */
3821 if (!ata_dev_enabled(dev))
3822 ata_eh_schedule_probe(dev);
3829 /* if UNLOADING, finish immediately */
3830 if (ap->pflags & ATA_PFLAG_UNLOADING)
3834 ata_for_each_link(link, ap, EDGE) {
3835 struct ata_eh_context *ehc = &link->eh_context;
3837 /* skip EH if possible. */
3838 if (ata_eh_skip_recovery(link))
3841 ata_for_each_dev(dev, link, ALL)
3842 ehc->classes[dev->devno] = ATA_DEV_UNKNOWN;
3846 ata_for_each_link(link, ap, EDGE) {
3847 struct ata_eh_context *ehc = &link->eh_context;
3849 if (!(ehc->i.action & ATA_EH_RESET))
3852 rc = ata_eh_reset(link, ata_link_nr_vacant(link),
3853 prereset, softreset, hardreset, postreset);
3855 ata_link_err(link, "reset failed, giving up\n");
3864 * clears ATA_EH_PARK in eh_info and resets
3865 * ap->park_req_pending
3867 ata_eh_pull_park_action(ap);
3870 ata_for_each_link(link, ap, EDGE) {
3871 ata_for_each_dev(dev, link, ALL) {
3872 struct ata_eh_context *ehc = &link->eh_context;
3875 if (dev->class != ATA_DEV_ATA &&
3876 dev->class != ATA_DEV_ZAC)
3878 if (!(ehc->i.dev_action[dev->devno] &
3881 tmp = dev->unpark_deadline;
3882 if (time_before(deadline, tmp))
3884 else if (time_before_eq(tmp, jiffies))
3886 if (ehc->unloaded_mask & (1 << dev->devno))
3889 ata_eh_park_issue_cmd(dev, 1);
3894 if (time_before_eq(deadline, now))
3898 deadline = wait_for_completion_timeout(&ap->park_req_pending,
3902 ata_for_each_link(link, ap, EDGE) {
3903 ata_for_each_dev(dev, link, ALL) {
3904 if (!(link->eh_context.unloaded_mask &
3908 ata_eh_park_issue_cmd(dev, 0);
3909 ata_eh_done(link, dev, ATA_EH_PARK);
3915 ata_for_each_link(link, ap, PMP_FIRST) {
3916 struct ata_eh_context *ehc = &link->eh_context;
3918 if (sata_pmp_attached(ap) && ata_is_host_link(link))
3921 /* revalidate existing devices and attach new ones */
3922 rc = ata_eh_revalidate_and_attach(link, &dev);
3926 /* if PMP got attached, return, pmp EH will take care of it */
3927 if (link->device->class == ATA_DEV_PMP) {
3932 /* configure transfer mode if necessary */
3933 if (ehc->i.flags & ATA_EHI_SETMODE) {
3934 rc = ata_set_mode(link, &dev);
3937 ehc->i.flags &= ~ATA_EHI_SETMODE;
3940 /* If reset has been issued, clear UA to avoid
3941 * disrupting the current users of the device.
3943 if (ehc->i.flags & ATA_EHI_DID_RESET) {
3944 ata_for_each_dev(dev, link, ALL) {
3945 if (dev->class != ATA_DEV_ATAPI)
3947 rc = atapi_eh_clear_ua(dev);
3950 if (zpodd_dev_enabled(dev))
3951 zpodd_post_poweron(dev);
3955 /* retry flush if necessary */
3956 ata_for_each_dev(dev, link, ALL) {
3957 if (dev->class != ATA_DEV_ATA &&
3958 dev->class != ATA_DEV_ZAC)
3960 rc = ata_eh_maybe_retry_flush(dev);
3966 /* configure link power saving */
3967 if (link->lpm_policy != ap->target_lpm_policy) {
3968 rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev);
3973 /* this link is okay now */
3980 ata_eh_handle_dev_fail(dev, rc);
3982 if (ap->pflags & ATA_PFLAG_FROZEN) {
3983 /* PMP reset requires working host port.
3984 * Can't retry if it's frozen.
3986 if (sata_pmp_attached(ap))
3996 if (rc && r_failed_link)
3997 *r_failed_link = link;
3999 DPRINTK("EXIT, rc=%d\n", rc);
4004 * ata_eh_finish - finish up EH
4005 * @ap: host port to finish EH for
4007 * Recovery is complete. Clean up EH states and retry or finish
4013 void ata_eh_finish(struct ata_port *ap)
4017 /* retry or finish qcs */
4018 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
4019 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
4021 if (!(qc->flags & ATA_QCFLAG_FAILED))
4025 /* FIXME: Once EH migration is complete,
4026 * generate sense data in this function,
4027 * considering both err_mask and tf.
4029 if (qc->flags & ATA_QCFLAG_RETRY)
4030 ata_eh_qc_retry(qc);
4032 ata_eh_qc_complete(qc);
4034 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
4035 ata_eh_qc_complete(qc);
4037 /* feed zero TF to sense generation */
4038 memset(&qc->result_tf, 0, sizeof(qc->result_tf));
4039 ata_eh_qc_retry(qc);
4044 /* make sure nr_active_links is zero after EH */
4045 WARN_ON(ap->nr_active_links);
4046 ap->nr_active_links = 0;
4050 * ata_do_eh - do standard error handling
4051 * @ap: host port to handle error for
4053 * @prereset: prereset method (can be NULL)
4054 * @softreset: softreset method (can be NULL)
4055 * @hardreset: hardreset method (can be NULL)
4056 * @postreset: postreset method (can be NULL)
4058 * Perform standard error handling sequence.
4061 * Kernel thread context (may sleep).
4063 void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
4064 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
4065 ata_postreset_fn_t postreset)
4067 struct ata_device *dev;
4073 rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset,
4076 ata_for_each_dev(dev, &ap->link, ALL)
4077 ata_dev_disable(dev);
4084 * ata_std_error_handler - standard error handler
4085 * @ap: host port to handle error for
4087 * Standard error handler
4090 * Kernel thread context (may sleep).
4092 void ata_std_error_handler(struct ata_port *ap)
4094 struct ata_port_operations *ops = ap->ops;
4095 ata_reset_fn_t hardreset = ops->hardreset;
4097 /* ignore built-in hardreset if SCR access is not available */
4098 if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link))
4101 ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
4106 * ata_eh_handle_port_suspend - perform port suspend operation
4107 * @ap: port to suspend
4112 * Kernel thread context (may sleep).
4114 static void ata_eh_handle_port_suspend(struct ata_port *ap)
4116 unsigned long flags;
4118 struct ata_device *dev;
4120 /* are we suspending? */
4121 spin_lock_irqsave(ap->lock, flags);
4122 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
4123 ap->pm_mesg.event & PM_EVENT_RESUME) {
4124 spin_unlock_irqrestore(ap->lock, flags);
4127 spin_unlock_irqrestore(ap->lock, flags);
4129 WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
4132 * If we have a ZPODD attached, check its zero
4133 * power ready status before the port is frozen.
4134 * Only needed for runtime suspend.
4136 if (PMSG_IS_AUTO(ap->pm_mesg)) {
4137 ata_for_each_dev(dev, &ap->link, ENABLED) {
4138 if (zpodd_dev_enabled(dev))
4139 zpodd_on_suspend(dev);
4143 /* tell ACPI we're suspending */
4144 rc = ata_acpi_on_suspend(ap);
4149 ata_eh_freeze_port(ap);
4151 if (ap->ops->port_suspend)
4152 rc = ap->ops->port_suspend(ap, ap->pm_mesg);
4154 ata_acpi_set_state(ap, ap->pm_mesg);
4156 /* update the flags */
4157 spin_lock_irqsave(ap->lock, flags);
4159 ap->pflags &= ~ATA_PFLAG_PM_PENDING;
4161 ap->pflags |= ATA_PFLAG_SUSPENDED;
4162 else if (ap->pflags & ATA_PFLAG_FROZEN)
4163 ata_port_schedule_eh(ap);
4165 spin_unlock_irqrestore(ap->lock, flags);
4171 * ata_eh_handle_port_resume - perform port resume operation
4172 * @ap: port to resume
4177 * Kernel thread context (may sleep).
4179 static void ata_eh_handle_port_resume(struct ata_port *ap)
4181 struct ata_link *link;
4182 struct ata_device *dev;
4183 unsigned long flags;
4185 /* are we resuming? */
4186 spin_lock_irqsave(ap->lock, flags);
4187 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
4188 !(ap->pm_mesg.event & PM_EVENT_RESUME)) {
4189 spin_unlock_irqrestore(ap->lock, flags);
4192 spin_unlock_irqrestore(ap->lock, flags);
4194 WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED));
4197 * Error timestamps are in jiffies which doesn't run while
4198 * suspended and PHY events during resume isn't too uncommon.
4199 * When the two are combined, it can lead to unnecessary speed
4200 * downs if the machine is suspended and resumed repeatedly.
4201 * Clear error history.
4203 ata_for_each_link(link, ap, HOST_FIRST)
4204 ata_for_each_dev(dev, link, ALL)
4205 ata_ering_clear(&dev->ering);
4207 ata_acpi_set_state(ap, ap->pm_mesg);
4209 if (ap->ops->port_resume)
4210 ap->ops->port_resume(ap);
4212 /* tell ACPI that we're resuming */
4213 ata_acpi_on_resume(ap);
4215 /* update the flags */
4216 spin_lock_irqsave(ap->lock, flags);
4217 ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
4218 spin_unlock_irqrestore(ap->lock, flags);
4220 #endif /* CONFIG_PM */