2 * Scsi Host Layer for MPT (Message Passing Technology) based controllers
4 * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c
5 * Copyright (C) 2012-2014 LSI Corporation
6 * Copyright (C) 2013-2014 Avago Technologies
7 * (mailto: MPT-FusionLinux.pdl@avagotech.com)
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 2
12 * of the License, or (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24 * solely responsible for determining the appropriateness of using and
25 * distributing the Program and assumes all risks associated with its
26 * exercise of rights under this Agreement, including but not limited to
27 * the risks and costs of program errors, damage to or loss of data,
28 * programs or equipment, and unavailability or interruption of operations.
30 * DISCLAIMER OF LIABILITY
31 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
39 * You should have received a copy of the GNU General Public License
40 * along with this program; if not, write to the Free Software
41 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
45 #include <linux/module.h>
46 #include <linux/kernel.h>
47 #include <linux/init.h>
48 #include <linux/errno.h>
49 #include <linux/blkdev.h>
50 #include <linux/sched.h>
51 #include <linux/workqueue.h>
52 #include <linux/delay.h>
53 #include <linux/pci.h>
54 #include <linux/interrupt.h>
55 #include <linux/raid_class.h>
56 #include <linux/blk-mq-pci.h>
57 #include <asm/unaligned.h>
59 #include "mpt3sas_base.h"
61 #define RAID_CHANNEL 1
63 #define PCIE_CHANNEL 2
66 static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
67 struct _sas_node *sas_expander);
68 static void _firmware_event_work(struct work_struct *work);
70 static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
71 struct _sas_device *sas_device);
72 static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle,
73 u8 retry_count, u8 is_pd);
74 static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
75 static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
76 struct _pcie_device *pcie_device);
78 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
79 static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid);
80 static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc);
82 /* global parameters */
83 LIST_HEAD(mpt3sas_ioc_list);
84 /* global ioc lock for list operations */
85 DEFINE_SPINLOCK(gioc_lock);
87 MODULE_AUTHOR(MPT3SAS_AUTHOR);
88 MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION);
89 MODULE_LICENSE("GPL");
90 MODULE_VERSION(MPT3SAS_DRIVER_VERSION);
91 MODULE_ALIAS("mpt2sas");
93 /* local parameters */
94 static u8 scsi_io_cb_idx = -1;
95 static u8 tm_cb_idx = -1;
96 static u8 ctl_cb_idx = -1;
97 static u8 base_cb_idx = -1;
98 static u8 port_enable_cb_idx = -1;
99 static u8 transport_cb_idx = -1;
100 static u8 scsih_cb_idx = -1;
101 static u8 config_cb_idx = -1;
105 static u8 tm_tr_cb_idx = -1 ;
106 static u8 tm_tr_volume_cb_idx = -1 ;
107 static u8 tm_sas_control_cb_idx = -1;
109 /* command line options */
110 static u32 logging_level;
111 MODULE_PARM_DESC(logging_level,
112 " bits for enabling additional logging info (default=0)");
115 static ushort max_sectors = 0xFFFF;
116 module_param(max_sectors, ushort, 0444);
117 MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767 default=32767");
120 static int missing_delay[2] = {-1, -1};
121 module_param_array(missing_delay, int, NULL, 0444);
122 MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
124 /* scsi-mid layer global parmeter is max_report_luns, which is 511 */
125 #define MPT3SAS_MAX_LUN (16895)
126 static u64 max_lun = MPT3SAS_MAX_LUN;
127 module_param(max_lun, ullong, 0444);
128 MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
130 static ushort hbas_to_enumerate;
131 module_param(hbas_to_enumerate, ushort, 0444);
132 MODULE_PARM_DESC(hbas_to_enumerate,
133 " 0 - enumerates both SAS 2.0 & SAS 3.0 generation HBAs\n \
134 1 - enumerates only SAS 2.0 generation HBAs\n \
135 2 - enumerates only SAS 3.0 generation HBAs (default=0)");
137 /* diag_buffer_enable is bitwise
139 * bit 1 set = SNAPSHOT
140 * bit 2 set = EXTENDED
142 * Either bit can be set, or both
144 static int diag_buffer_enable = -1;
145 module_param(diag_buffer_enable, int, 0444);
146 MODULE_PARM_DESC(diag_buffer_enable,
147 " post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
148 static int disable_discovery = -1;
149 module_param(disable_discovery, int, 0444);
150 MODULE_PARM_DESC(disable_discovery, " disable discovery ");
153 /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
154 static int prot_mask = -1;
155 module_param(prot_mask, int, 0444);
156 MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
158 static bool enable_sdev_max_qd;
159 module_param(enable_sdev_max_qd, bool, 0444);
160 MODULE_PARM_DESC(enable_sdev_max_qd,
161 "Enable sdev max qd as can_queue, def=disabled(0)");
163 static int multipath_on_hba = -1;
164 module_param(multipath_on_hba, int, 0);
165 MODULE_PARM_DESC(multipath_on_hba,
166 "Multipath support to add same target device\n\t\t"
167 "as many times as it is visible to HBA from various paths\n\t\t"
169 "\t SAS 2.0 & SAS 3.0 HBA - This will be disabled,\n\t\t"
170 "\t SAS 3.5 HBA - This will be enabled)");
172 static int host_tagset_enable = 1;
173 module_param(host_tagset_enable, int, 0444);
174 MODULE_PARM_DESC(host_tagset_enable,
175 "Shared host tagset enable/disable Default: enable(1)");
177 /* raid transport support */
178 static struct raid_template *mpt3sas_raid_template;
179 static struct raid_template *mpt2sas_raid_template;
183 * struct sense_info - common structure for obtaining sense keys
185 * @asc: additional sense code
186 * @ascq: additional sense code qualifier
194 #define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB)
195 #define MPT3SAS_TURN_ON_PFA_LED (0xFFFC)
196 #define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD)
197 #define MPT3SAS_ABRT_TASK_SET (0xFFFE)
198 #define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
200 * struct fw_event_work - firmware event struct
201 * @list: link list framework
202 * @work: work object (ioc->fault_reset_work_q)
203 * @ioc: per adapter object
204 * @device_handle: device handle
205 * @VF_ID: virtual function id
206 * @VP_ID: virtual port id
207 * @ignore: flag meaning this event has been marked to ignore
208 * @event: firmware event MPI2_EVENT_XXX defined in mpi2_ioc.h
209 * @refcount: kref for this event
210 * @event_data: reply event data payload follows
212 * This object stored on ioc->fw_event_list.
214 struct fw_event_work {
215 struct list_head list;
216 struct work_struct work;
218 struct MPT3SAS_ADAPTER *ioc;
224 struct kref refcount;
225 char event_data[] __aligned(4);
228 static void fw_event_work_free(struct kref *r)
230 kfree(container_of(r, struct fw_event_work, refcount));
233 static void fw_event_work_get(struct fw_event_work *fw_work)
235 kref_get(&fw_work->refcount);
238 static void fw_event_work_put(struct fw_event_work *fw_work)
240 kref_put(&fw_work->refcount, fw_event_work_free);
243 static struct fw_event_work *alloc_fw_event_work(int len)
245 struct fw_event_work *fw_event;
247 fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC);
251 kref_init(&fw_event->refcount);
256 * struct _scsi_io_transfer - scsi io transfer
257 * @handle: sas device handle (assigned by firmware)
258 * @is_raid: flag set for hidden raid components
259 * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE,
260 * @data_length: data transfer length
261 * @data_dma: dma pointer to data
264 * @cdb_length: cdb length
266 * @timeout: timeout for this command
267 * @VF_ID: virtual function id
268 * @VP_ID: virtual port id
269 * @valid_reply: flag set for reply message
270 * @sense_length: sense length
271 * @ioc_status: ioc status
272 * @scsi_state: scsi state
273 * @scsi_status: scsi staus
274 * @log_info: log information
275 * @transfer_length: data length transfer when there is a reply message
277 * Used for sending internal scsi commands to devices within this module.
278 * Refer to _scsi_send_scsi_io().
280 struct _scsi_io_transfer {
283 enum dma_data_direction dir;
286 u8 sense[SCSI_SENSE_BUFFERSIZE];
294 /* the following bits are only valid when 'valid_reply = 1' */
304 * _scsih_set_debug_level - global setting of ioc->logging_level.
308 * Note: The logging levels are defined in mpt3sas_debug.h.
311 _scsih_set_debug_level(const char *val, const struct kernel_param *kp)
313 int ret = param_set_int(val, kp);
314 struct MPT3SAS_ADAPTER *ioc;
319 pr_info("setting logging_level(0x%08x)\n", logging_level);
320 spin_lock(&gioc_lock);
321 list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
322 ioc->logging_level = logging_level;
323 spin_unlock(&gioc_lock);
326 module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
327 &logging_level, 0644);
330 * _scsih_srch_boot_sas_address - search based on sas_address
331 * @sas_address: sas address
332 * @boot_device: boot device object from bios page 2
334 * Return: 1 when there's a match, 0 means no match.
337 _scsih_srch_boot_sas_address(u64 sas_address,
338 Mpi2BootDeviceSasWwid_t *boot_device)
340 return (sas_address == le64_to_cpu(boot_device->SASAddress)) ? 1 : 0;
344 * _scsih_srch_boot_device_name - search based on device name
345 * @device_name: device name specified in INDENTIFY fram
346 * @boot_device: boot device object from bios page 2
348 * Return: 1 when there's a match, 0 means no match.
351 _scsih_srch_boot_device_name(u64 device_name,
352 Mpi2BootDeviceDeviceName_t *boot_device)
354 return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0;
358 * _scsih_srch_boot_encl_slot - search based on enclosure_logical_id/slot
359 * @enclosure_logical_id: enclosure logical id
360 * @slot_number: slot number
361 * @boot_device: boot device object from bios page 2
363 * Return: 1 when there's a match, 0 means no match.
366 _scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
367 Mpi2BootDeviceEnclosureSlot_t *boot_device)
369 return (enclosure_logical_id == le64_to_cpu(boot_device->
370 EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device->
371 SlotNumber)) ? 1 : 0;
375 * mpt3sas_get_port_by_id - get hba port entry corresponding to provided
376 * port number from port list
377 * @ioc: per adapter object
378 * @port_id: port number
379 * @bypass_dirty_port_flag: when set look the matching hba port entry even
380 * if hba port entry is marked as dirty.
382 * Search for hba port entry corresponding to provided port number,
383 * if available return port object otherwise return NULL.
386 mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER *ioc,
387 u8 port_id, u8 bypass_dirty_port_flag)
389 struct hba_port *port, *port_next;
392 * When multipath_on_hba is disabled then
393 * search the hba_port entry using default
396 if (!ioc->multipath_on_hba)
397 port_id = MULTIPATH_DISABLED_PORT_ID;
399 list_for_each_entry_safe(port, port_next,
400 &ioc->port_table_list, list) {
401 if (port->port_id != port_id)
403 if (bypass_dirty_port_flag)
405 if (port->flags & HBA_PORT_FLAG_DIRTY_PORT)
411 * Allocate hba_port object for default port id (i.e. 255)
412 * when multipath_on_hba is disabled for the HBA.
413 * And add this object to port_table_list.
415 if (!ioc->multipath_on_hba) {
416 port = kzalloc(sizeof(struct hba_port), GFP_ATOMIC);
420 port->port_id = port_id;
422 "hba_port entry: %p, port: %d is added to hba_port list\n",
423 port, port->port_id);
424 list_add_tail(&port->list,
425 &ioc->port_table_list);
432 * mpt3sas_get_vphy_by_phy - get virtual_phy object corresponding to phy number
433 * @ioc: per adapter object
434 * @port: hba_port object
437 * Return virtual_phy object corresponding to phy number.
440 mpt3sas_get_vphy_by_phy(struct MPT3SAS_ADAPTER *ioc,
441 struct hba_port *port, u32 phy)
443 struct virtual_phy *vphy, *vphy_next;
445 if (!port->vphys_mask)
448 list_for_each_entry_safe(vphy, vphy_next, &port->vphys_list, list) {
449 if (vphy->phy_mask & (1 << phy))
456 * _scsih_is_boot_device - search for matching boot device.
457 * @sas_address: sas address
458 * @device_name: device name specified in INDENTIFY fram
459 * @enclosure_logical_id: enclosure logical id
461 * @form: specifies boot device form
462 * @boot_device: boot device object from bios page 2
464 * Return: 1 when there's a match, 0 means no match.
467 _scsih_is_boot_device(u64 sas_address, u64 device_name,
468 u64 enclosure_logical_id, u16 slot, u8 form,
469 Mpi2BiosPage2BootDevice_t *boot_device)
474 case MPI2_BIOSPAGE2_FORM_SAS_WWID:
477 rc = _scsih_srch_boot_sas_address(
478 sas_address, &boot_device->SasWwid);
480 case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT:
481 if (!enclosure_logical_id)
483 rc = _scsih_srch_boot_encl_slot(
484 enclosure_logical_id,
485 slot, &boot_device->EnclosureSlot);
487 case MPI2_BIOSPAGE2_FORM_DEVICE_NAME:
490 rc = _scsih_srch_boot_device_name(
491 device_name, &boot_device->DeviceName);
493 case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED:
501 * _scsih_get_sas_address - set the sas_address for given device handle
503 * @handle: device handle
504 * @sas_address: sas address
506 * Return: 0 success, non-zero when failure
509 _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
512 Mpi2SasDevicePage0_t sas_device_pg0;
513 Mpi2ConfigReply_t mpi_reply;
518 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
519 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
520 ioc_err(ioc, "failure at %s:%d/%s()!\n",
521 __FILE__, __LINE__, __func__);
525 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
526 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
527 /* For HBA, vSES doesn't return HBA SAS address. Instead return
528 * vSES's sas address.
530 if ((handle <= ioc->sas_hba.num_phys) &&
531 (!(le32_to_cpu(sas_device_pg0.DeviceInfo) &
532 MPI2_SAS_DEVICE_INFO_SEP)))
533 *sas_address = ioc->sas_hba.sas_address;
535 *sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
539 /* we hit this because the given parent handle doesn't exist */
540 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
543 /* else error case */
544 ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
545 handle, ioc_status, __FILE__, __LINE__, __func__);
550 * _scsih_determine_boot_device - determine boot device.
551 * @ioc: per adapter object
552 * @device: sas_device or pcie_device object
553 * @channel: SAS or PCIe channel
555 * Determines whether this device should be first reported device to
556 * to scsi-ml or sas transport, this purpose is for persistent boot device.
557 * There are primary, alternate, and current entries in bios page 2. The order
558 * priority is primary, alternate, then current. This routine saves
559 * the corresponding device object.
560 * The saved data to be used later in _scsih_probe_boot_devices().
563 _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
566 struct _sas_device *sas_device;
567 struct _pcie_device *pcie_device;
568 struct _raid_device *raid_device;
571 u64 enclosure_logical_id;
574 /* only process this function when driver loads */
575 if (!ioc->is_driver_loading)
578 /* no Bios, return immediately */
579 if (!ioc->bios_pg3.BiosVersion)
582 if (channel == RAID_CHANNEL) {
583 raid_device = device;
584 sas_address = raid_device->wwid;
586 enclosure_logical_id = 0;
588 } else if (channel == PCIE_CHANNEL) {
589 pcie_device = device;
590 sas_address = pcie_device->wwid;
592 enclosure_logical_id = 0;
596 sas_address = sas_device->sas_address;
597 device_name = sas_device->device_name;
598 enclosure_logical_id = sas_device->enclosure_logical_id;
599 slot = sas_device->slot;
602 if (!ioc->req_boot_device.device) {
603 if (_scsih_is_boot_device(sas_address, device_name,
604 enclosure_logical_id, slot,
605 (ioc->bios_pg2.ReqBootDeviceForm &
606 MPI2_BIOSPAGE2_FORM_MASK),
607 &ioc->bios_pg2.RequestedBootDevice)) {
609 ioc_info(ioc, "%s: req_boot_device(0x%016llx)\n",
610 __func__, (u64)sas_address));
611 ioc->req_boot_device.device = device;
612 ioc->req_boot_device.channel = channel;
616 if (!ioc->req_alt_boot_device.device) {
617 if (_scsih_is_boot_device(sas_address, device_name,
618 enclosure_logical_id, slot,
619 (ioc->bios_pg2.ReqAltBootDeviceForm &
620 MPI2_BIOSPAGE2_FORM_MASK),
621 &ioc->bios_pg2.RequestedAltBootDevice)) {
623 ioc_info(ioc, "%s: req_alt_boot_device(0x%016llx)\n",
624 __func__, (u64)sas_address));
625 ioc->req_alt_boot_device.device = device;
626 ioc->req_alt_boot_device.channel = channel;
630 if (!ioc->current_boot_device.device) {
631 if (_scsih_is_boot_device(sas_address, device_name,
632 enclosure_logical_id, slot,
633 (ioc->bios_pg2.CurrentBootDeviceForm &
634 MPI2_BIOSPAGE2_FORM_MASK),
635 &ioc->bios_pg2.CurrentBootDevice)) {
637 ioc_info(ioc, "%s: current_boot_device(0x%016llx)\n",
638 __func__, (u64)sas_address));
639 ioc->current_boot_device.device = device;
640 ioc->current_boot_device.channel = channel;
645 static struct _sas_device *
646 __mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
647 struct MPT3SAS_TARGET *tgt_priv)
649 struct _sas_device *ret;
651 assert_spin_locked(&ioc->sas_device_lock);
653 ret = tgt_priv->sas_dev;
660 static struct _sas_device *
661 mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
662 struct MPT3SAS_TARGET *tgt_priv)
664 struct _sas_device *ret;
667 spin_lock_irqsave(&ioc->sas_device_lock, flags);
668 ret = __mpt3sas_get_sdev_from_target(ioc, tgt_priv);
669 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
674 static struct _pcie_device *
675 __mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
676 struct MPT3SAS_TARGET *tgt_priv)
678 struct _pcie_device *ret;
680 assert_spin_locked(&ioc->pcie_device_lock);
682 ret = tgt_priv->pcie_dev;
684 pcie_device_get(ret);
690 * mpt3sas_get_pdev_from_target - pcie device search
691 * @ioc: per adapter object
692 * @tgt_priv: starget private object
694 * Context: This function will acquire ioc->pcie_device_lock and will release
695 * before returning the pcie_device object.
697 * This searches for pcie_device from target, then return pcie_device object.
699 static struct _pcie_device *
700 mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
701 struct MPT3SAS_TARGET *tgt_priv)
703 struct _pcie_device *ret;
706 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
707 ret = __mpt3sas_get_pdev_from_target(ioc, tgt_priv);
708 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
715 * __mpt3sas_get_sdev_by_rphy - sas device search
716 * @ioc: per adapter object
717 * @rphy: sas_rphy pointer
719 * Context: This function will acquire ioc->sas_device_lock and will release
720 * before returning the sas_device object.
722 * This searches for sas_device from rphy object
723 * then return sas_device object.
726 __mpt3sas_get_sdev_by_rphy(struct MPT3SAS_ADAPTER *ioc,
727 struct sas_rphy *rphy)
729 struct _sas_device *sas_device;
731 assert_spin_locked(&ioc->sas_device_lock);
733 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
734 if (sas_device->rphy != rphy)
736 sas_device_get(sas_device);
741 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) {
742 if (sas_device->rphy != rphy)
744 sas_device_get(sas_device);
752 * __mpt3sas_get_sdev_by_addr - get _sas_device object corresponding to provided
753 * sas address from sas_device_list list
754 * @ioc: per adapter object
755 * @sas_address: device sas address
758 * Search for _sas_device object corresponding to provided sas address,
759 * if available return _sas_device object address otherwise return NULL.
762 __mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
763 u64 sas_address, struct hba_port *port)
765 struct _sas_device *sas_device;
770 assert_spin_locked(&ioc->sas_device_lock);
772 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
773 if (sas_device->sas_address != sas_address)
775 if (sas_device->port != port)
777 sas_device_get(sas_device);
781 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) {
782 if (sas_device->sas_address != sas_address)
784 if (sas_device->port != port)
786 sas_device_get(sas_device);
794 * mpt3sas_get_sdev_by_addr - sas device search
795 * @ioc: per adapter object
796 * @sas_address: sas address
797 * @port: hba port entry
798 * Context: Calling function should acquire ioc->sas_device_lock
800 * This searches for sas_device based on sas_address & port number,
801 * then return sas_device object.
804 mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
805 u64 sas_address, struct hba_port *port)
807 struct _sas_device *sas_device;
810 spin_lock_irqsave(&ioc->sas_device_lock, flags);
811 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
813 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
818 static struct _sas_device *
819 __mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
821 struct _sas_device *sas_device;
823 assert_spin_locked(&ioc->sas_device_lock);
825 list_for_each_entry(sas_device, &ioc->sas_device_list, list)
826 if (sas_device->handle == handle)
829 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
830 if (sas_device->handle == handle)
836 sas_device_get(sas_device);
841 * mpt3sas_get_sdev_by_handle - sas device search
842 * @ioc: per adapter object
843 * @handle: sas device handle (assigned by firmware)
844 * Context: Calling function should acquire ioc->sas_device_lock
846 * This searches for sas_device based on sas_address, then return sas_device
850 mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
852 struct _sas_device *sas_device;
855 spin_lock_irqsave(&ioc->sas_device_lock, flags);
856 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
857 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
863 * _scsih_display_enclosure_chassis_info - display device location info
864 * @ioc: per adapter object
865 * @sas_device: per sas device object
866 * @sdev: scsi device struct
867 * @starget: scsi target struct
870 _scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc,
871 struct _sas_device *sas_device, struct scsi_device *sdev,
872 struct scsi_target *starget)
875 if (sas_device->enclosure_handle != 0)
876 sdev_printk(KERN_INFO, sdev,
877 "enclosure logical id (0x%016llx), slot(%d) \n",
879 sas_device->enclosure_logical_id,
881 if (sas_device->connector_name[0] != '\0')
882 sdev_printk(KERN_INFO, sdev,
883 "enclosure level(0x%04x), connector name( %s)\n",
884 sas_device->enclosure_level,
885 sas_device->connector_name);
886 if (sas_device->is_chassis_slot_valid)
887 sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n",
888 sas_device->chassis_slot);
889 } else if (starget) {
890 if (sas_device->enclosure_handle != 0)
891 starget_printk(KERN_INFO, starget,
892 "enclosure logical id(0x%016llx), slot(%d) \n",
894 sas_device->enclosure_logical_id,
896 if (sas_device->connector_name[0] != '\0')
897 starget_printk(KERN_INFO, starget,
898 "enclosure level(0x%04x), connector name( %s)\n",
899 sas_device->enclosure_level,
900 sas_device->connector_name);
901 if (sas_device->is_chassis_slot_valid)
902 starget_printk(KERN_INFO, starget,
903 "chassis slot(0x%04x)\n",
904 sas_device->chassis_slot);
906 if (sas_device->enclosure_handle != 0)
907 ioc_info(ioc, "enclosure logical id(0x%016llx), slot(%d)\n",
908 (u64)sas_device->enclosure_logical_id,
910 if (sas_device->connector_name[0] != '\0')
911 ioc_info(ioc, "enclosure level(0x%04x), connector name( %s)\n",
912 sas_device->enclosure_level,
913 sas_device->connector_name);
914 if (sas_device->is_chassis_slot_valid)
915 ioc_info(ioc, "chassis slot(0x%04x)\n",
916 sas_device->chassis_slot);
921 * _scsih_sas_device_remove - remove sas_device from list.
922 * @ioc: per adapter object
923 * @sas_device: the sas_device object
924 * Context: This function will acquire ioc->sas_device_lock.
926 * If sas_device is on the list, remove it and decrement its reference count.
929 _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
930 struct _sas_device *sas_device)
936 ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
937 sas_device->handle, (u64)sas_device->sas_address);
939 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
942 * The lock serializes access to the list, but we still need to verify
943 * that nobody removed the entry while we were waiting on the lock.
945 spin_lock_irqsave(&ioc->sas_device_lock, flags);
946 if (!list_empty(&sas_device->list)) {
947 list_del_init(&sas_device->list);
948 sas_device_put(sas_device);
950 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
954 * _scsih_device_remove_by_handle - removing device object by handle
955 * @ioc: per adapter object
956 * @handle: device handle
959 _scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
961 struct _sas_device *sas_device;
964 if (ioc->shost_recovery)
967 spin_lock_irqsave(&ioc->sas_device_lock, flags);
968 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
970 list_del_init(&sas_device->list);
971 sas_device_put(sas_device);
973 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
975 _scsih_remove_device(ioc, sas_device);
976 sas_device_put(sas_device);
981 * mpt3sas_device_remove_by_sas_address - removing device object by
982 * sas address & port number
983 * @ioc: per adapter object
984 * @sas_address: device sas_address
985 * @port: hba port entry
990 mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
991 u64 sas_address, struct hba_port *port)
993 struct _sas_device *sas_device;
996 if (ioc->shost_recovery)
999 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1000 sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address, port);
1002 list_del_init(&sas_device->list);
1003 sas_device_put(sas_device);
1005 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1007 _scsih_remove_device(ioc, sas_device);
1008 sas_device_put(sas_device);
1013 * _scsih_sas_device_add - insert sas_device to the list.
1014 * @ioc: per adapter object
1015 * @sas_device: the sas_device object
1016 * Context: This function will acquire ioc->sas_device_lock.
1018 * Adding new object to the ioc->sas_device_list.
1021 _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
1022 struct _sas_device *sas_device)
1024 unsigned long flags;
1027 ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
1028 __func__, sas_device->handle,
1029 (u64)sas_device->sas_address));
1031 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
1034 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1035 sas_device_get(sas_device);
1036 list_add_tail(&sas_device->list, &ioc->sas_device_list);
1037 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1039 if (ioc->hide_drives) {
1040 clear_bit(sas_device->handle, ioc->pend_os_device_add);
1044 if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
1045 sas_device->sas_address_parent, sas_device->port)) {
1046 _scsih_sas_device_remove(ioc, sas_device);
1047 } else if (!sas_device->starget) {
1049 * When asyn scanning is enabled, its not possible to remove
1050 * devices while scanning is turned on due to an oops in
1051 * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start()
1053 if (!ioc->is_driver_loading) {
1054 mpt3sas_transport_port_remove(ioc,
1055 sas_device->sas_address,
1056 sas_device->sas_address_parent,
1058 _scsih_sas_device_remove(ioc, sas_device);
1061 clear_bit(sas_device->handle, ioc->pend_os_device_add);
1065 * _scsih_sas_device_init_add - insert sas_device to the list.
1066 * @ioc: per adapter object
1067 * @sas_device: the sas_device object
1068 * Context: This function will acquire ioc->sas_device_lock.
1070 * Adding new object at driver load time to the ioc->sas_device_init_list.
1073 _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1074 struct _sas_device *sas_device)
1076 unsigned long flags;
1079 ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
1080 __func__, sas_device->handle,
1081 (u64)sas_device->sas_address));
1083 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
1086 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1087 sas_device_get(sas_device);
1088 list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
1089 _scsih_determine_boot_device(ioc, sas_device, 0);
1090 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1094 static struct _pcie_device *
1095 __mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1097 struct _pcie_device *pcie_device;
1099 assert_spin_locked(&ioc->pcie_device_lock);
1101 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1102 if (pcie_device->wwid == wwid)
1105 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1106 if (pcie_device->wwid == wwid)
1112 pcie_device_get(pcie_device);
1118 * mpt3sas_get_pdev_by_wwid - pcie device search
1119 * @ioc: per adapter object
1122 * Context: This function will acquire ioc->pcie_device_lock and will release
1123 * before returning the pcie_device object.
1125 * This searches for pcie_device based on wwid, then return pcie_device object.
1127 static struct _pcie_device *
1128 mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1130 struct _pcie_device *pcie_device;
1131 unsigned long flags;
1133 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1134 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
1135 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1141 static struct _pcie_device *
1142 __mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER *ioc, int id,
1145 struct _pcie_device *pcie_device;
1147 assert_spin_locked(&ioc->pcie_device_lock);
1149 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1150 if (pcie_device->id == id && pcie_device->channel == channel)
1153 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1154 if (pcie_device->id == id && pcie_device->channel == channel)
1160 pcie_device_get(pcie_device);
1164 static struct _pcie_device *
1165 __mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1167 struct _pcie_device *pcie_device;
1169 assert_spin_locked(&ioc->pcie_device_lock);
1171 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1172 if (pcie_device->handle == handle)
1175 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1176 if (pcie_device->handle == handle)
1182 pcie_device_get(pcie_device);
1188 * mpt3sas_get_pdev_by_handle - pcie device search
1189 * @ioc: per adapter object
1190 * @handle: Firmware device handle
1192 * Context: This function will acquire ioc->pcie_device_lock and will release
1193 * before returning the pcie_device object.
1195 * This searches for pcie_device based on handle, then return pcie_device
1198 struct _pcie_device *
1199 mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1201 struct _pcie_device *pcie_device;
1202 unsigned long flags;
1204 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1205 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1206 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1212 * _scsih_set_nvme_max_shutdown_latency - Update max_shutdown_latency.
1213 * @ioc: per adapter object
1214 * Context: This function will acquire ioc->pcie_device_lock
1216 * Update ioc->max_shutdown_latency to that NVMe drives RTD3 Entry Latency
1217 * which has reported maximum among all available NVMe drives.
1218 * Minimum max_shutdown_latency will be six seconds.
1221 _scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER *ioc)
1223 struct _pcie_device *pcie_device;
1224 unsigned long flags;
1225 u16 shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
1227 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1228 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
1229 if (pcie_device->shutdown_latency) {
1230 if (shutdown_latency < pcie_device->shutdown_latency)
1232 pcie_device->shutdown_latency;
1235 ioc->max_shutdown_latency = shutdown_latency;
1236 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1240 * _scsih_pcie_device_remove - remove pcie_device from list.
1241 * @ioc: per adapter object
1242 * @pcie_device: the pcie_device object
1243 * Context: This function will acquire ioc->pcie_device_lock.
1245 * If pcie_device is on the list, remove it and decrement its reference count.
1248 _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
1249 struct _pcie_device *pcie_device)
1251 unsigned long flags;
1252 int was_on_pcie_device_list = 0;
1253 u8 update_latency = 0;
1257 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
1258 pcie_device->handle, (u64)pcie_device->wwid);
1259 if (pcie_device->enclosure_handle != 0)
1260 ioc_info(ioc, "removing enclosure logical id(0x%016llx), slot(%d)\n",
1261 (u64)pcie_device->enclosure_logical_id,
1263 if (pcie_device->connector_name[0] != '\0')
1264 ioc_info(ioc, "removing enclosure level(0x%04x), connector name( %s)\n",
1265 pcie_device->enclosure_level,
1266 pcie_device->connector_name);
1268 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1269 if (!list_empty(&pcie_device->list)) {
1270 list_del_init(&pcie_device->list);
1271 was_on_pcie_device_list = 1;
1273 if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
1275 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1276 if (was_on_pcie_device_list) {
1277 kfree(pcie_device->serial_number);
1278 pcie_device_put(pcie_device);
1282 * This device's RTD3 Entry Latency matches IOC's
1283 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
1284 * from the available drives as current drive is getting removed.
1287 _scsih_set_nvme_max_shutdown_latency(ioc);
1292 * _scsih_pcie_device_remove_by_handle - removing pcie device object by handle
1293 * @ioc: per adapter object
1294 * @handle: device handle
1297 _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1299 struct _pcie_device *pcie_device;
1300 unsigned long flags;
1301 int was_on_pcie_device_list = 0;
1302 u8 update_latency = 0;
1304 if (ioc->shost_recovery)
1307 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1308 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1310 if (!list_empty(&pcie_device->list)) {
1311 list_del_init(&pcie_device->list);
1312 was_on_pcie_device_list = 1;
1313 pcie_device_put(pcie_device);
1315 if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
1318 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1319 if (was_on_pcie_device_list) {
1320 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
1321 pcie_device_put(pcie_device);
1325 * This device's RTD3 Entry Latency matches IOC's
1326 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
1327 * from the available drives as current drive is getting removed.
1330 _scsih_set_nvme_max_shutdown_latency(ioc);
1334 * _scsih_pcie_device_add - add pcie_device object
1335 * @ioc: per adapter object
1336 * @pcie_device: pcie_device object
1338 * This is added to the pcie_device_list link list.
1341 _scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc,
1342 struct _pcie_device *pcie_device)
1344 unsigned long flags;
1347 ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1349 pcie_device->handle, (u64)pcie_device->wwid));
1350 if (pcie_device->enclosure_handle != 0)
1352 ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1354 (u64)pcie_device->enclosure_logical_id,
1355 pcie_device->slot));
1356 if (pcie_device->connector_name[0] != '\0')
1358 ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1359 __func__, pcie_device->enclosure_level,
1360 pcie_device->connector_name));
1362 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1363 pcie_device_get(pcie_device);
1364 list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
1365 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1367 if (pcie_device->access_status ==
1368 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
1369 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1372 if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) {
1373 _scsih_pcie_device_remove(ioc, pcie_device);
1374 } else if (!pcie_device->starget) {
1375 if (!ioc->is_driver_loading) {
1376 /*TODO-- Need to find out whether this condition will occur or not*/
1377 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1380 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1384 * _scsih_pcie_device_init_add - insert pcie_device to the init list.
1385 * @ioc: per adapter object
1386 * @pcie_device: the pcie_device object
1387 * Context: This function will acquire ioc->pcie_device_lock.
1389 * Adding new object at driver load time to the ioc->pcie_device_init_list.
1392 _scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1393 struct _pcie_device *pcie_device)
1395 unsigned long flags;
1398 ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1400 pcie_device->handle, (u64)pcie_device->wwid));
1401 if (pcie_device->enclosure_handle != 0)
1403 ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1405 (u64)pcie_device->enclosure_logical_id,
1406 pcie_device->slot));
1407 if (pcie_device->connector_name[0] != '\0')
1409 ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1410 __func__, pcie_device->enclosure_level,
1411 pcie_device->connector_name));
1413 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1414 pcie_device_get(pcie_device);
1415 list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list);
1416 if (pcie_device->access_status !=
1417 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED)
1418 _scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL);
1419 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1422 * _scsih_raid_device_find_by_id - raid device search
1423 * @ioc: per adapter object
1424 * @id: sas device target id
1425 * @channel: sas device channel
1426 * Context: Calling function should acquire ioc->raid_device_lock
1428 * This searches for raid_device based on target id, then return raid_device
1431 static struct _raid_device *
1432 _scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel)
1434 struct _raid_device *raid_device, *r;
1437 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1438 if (raid_device->id == id && raid_device->channel == channel) {
1449 * mpt3sas_raid_device_find_by_handle - raid device search
1450 * @ioc: per adapter object
1451 * @handle: sas device handle (assigned by firmware)
1452 * Context: Calling function should acquire ioc->raid_device_lock
1454 * This searches for raid_device based on handle, then return raid_device
1457 struct _raid_device *
1458 mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1460 struct _raid_device *raid_device, *r;
1463 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1464 if (raid_device->handle != handle)
1475 * _scsih_raid_device_find_by_wwid - raid device search
1476 * @ioc: per adapter object
1478 * Context: Calling function should acquire ioc->raid_device_lock
1480 * This searches for raid_device based on wwid, then return raid_device
1483 static struct _raid_device *
1484 _scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1486 struct _raid_device *raid_device, *r;
1489 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1490 if (raid_device->wwid != wwid)
1501 * _scsih_raid_device_add - add raid_device object
1502 * @ioc: per adapter object
1503 * @raid_device: raid_device object
1505 * This is added to the raid_device_list link list.
1508 _scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc,
1509 struct _raid_device *raid_device)
1511 unsigned long flags;
1514 ioc_info(ioc, "%s: handle(0x%04x), wwid(0x%016llx)\n",
1516 raid_device->handle, (u64)raid_device->wwid));
1518 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1519 list_add_tail(&raid_device->list, &ioc->raid_device_list);
1520 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1524 * _scsih_raid_device_remove - delete raid_device object
1525 * @ioc: per adapter object
1526 * @raid_device: raid_device object
1530 _scsih_raid_device_remove(struct MPT3SAS_ADAPTER *ioc,
1531 struct _raid_device *raid_device)
1533 unsigned long flags;
1535 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1536 list_del(&raid_device->list);
1538 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1542 * mpt3sas_scsih_expander_find_by_handle - expander device search
1543 * @ioc: per adapter object
1544 * @handle: expander handle (assigned by firmware)
1545 * Context: Calling function should acquire ioc->sas_device_lock
1547 * This searches for expander device based on handle, then returns the
1551 mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1553 struct _sas_node *sas_expander, *r;
1556 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1557 if (sas_expander->handle != handle)
1567 * mpt3sas_scsih_enclosure_find_by_handle - exclosure device search
1568 * @ioc: per adapter object
1569 * @handle: enclosure handle (assigned by firmware)
1570 * Context: Calling function should acquire ioc->sas_device_lock
1572 * This searches for enclosure device based on handle, then returns the
1575 static struct _enclosure_node *
1576 mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1578 struct _enclosure_node *enclosure_dev, *r;
1581 list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) {
1582 if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle)
1591 * mpt3sas_scsih_expander_find_by_sas_address - expander device search
1592 * @ioc: per adapter object
1593 * @sas_address: sas address
1594 * @port: hba port entry
1595 * Context: Calling function should acquire ioc->sas_node_lock.
1597 * This searches for expander device based on sas_address & port number,
1598 * then returns the sas_node object.
1601 mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
1602 u64 sas_address, struct hba_port *port)
1604 struct _sas_node *sas_expander, *r = NULL;
1609 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1610 if (sas_expander->sas_address != sas_address)
1612 if (sas_expander->port != port)
1622 * _scsih_expander_node_add - insert expander device to the list.
1623 * @ioc: per adapter object
1624 * @sas_expander: the sas_device object
1625 * Context: This function will acquire ioc->sas_node_lock.
1627 * Adding new object to the ioc->sas_expander_list.
1630 _scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc,
1631 struct _sas_node *sas_expander)
1633 unsigned long flags;
1635 spin_lock_irqsave(&ioc->sas_node_lock, flags);
1636 list_add_tail(&sas_expander->list, &ioc->sas_expander_list);
1637 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
1641 * _scsih_is_end_device - determines if device is an end device
1642 * @device_info: bitfield providing information about the device.
1645 * Return: 1 if end device.
1648 _scsih_is_end_device(u32 device_info)
1650 if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE &&
1651 ((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) |
1652 (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) |
1653 (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)))
1660 * _scsih_is_nvme_pciescsi_device - determines if
1661 * device is an pcie nvme/scsi device
1662 * @device_info: bitfield providing information about the device.
1665 * Returns 1 if device is pcie device type nvme/scsi.
1668 _scsih_is_nvme_pciescsi_device(u32 device_info)
1670 if (((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1671 == MPI26_PCIE_DEVINFO_NVME) ||
1672 ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1673 == MPI26_PCIE_DEVINFO_SCSI))
1680 * _scsih_scsi_lookup_find_by_target - search for matching channel:id
1681 * @ioc: per adapter object
1684 * Context: This function will acquire ioc->scsi_lookup_lock.
1686 * This will search for a matching channel:id in the scsi_lookup array,
1687 * returning 1 if found.
1690 _scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id,
1694 struct scsi_cmnd *scmd;
1697 smid <= ioc->shost->can_queue; smid++) {
1698 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1701 if (scmd->device->id == id &&
1702 scmd->device->channel == channel)
1709 * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun
1710 * @ioc: per adapter object
1714 * Context: This function will acquire ioc->scsi_lookup_lock.
1716 * This will search for a matching channel:id:lun in the scsi_lookup array,
1717 * returning 1 if found.
1720 _scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id,
1721 unsigned int lun, int channel)
1724 struct scsi_cmnd *scmd;
1726 for (smid = 1; smid <= ioc->shost->can_queue; smid++) {
1728 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1731 if (scmd->device->id == id &&
1732 scmd->device->channel == channel &&
1733 scmd->device->lun == lun)
1740 * mpt3sas_scsih_scsi_lookup_get - returns scmd entry
1741 * @ioc: per adapter object
1742 * @smid: system request message index
1744 * Return: the smid stored scmd pointer.
1745 * Then will dereference the stored scmd pointer.
1748 mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1750 struct scsi_cmnd *scmd = NULL;
1751 struct scsiio_tracker *st;
1752 Mpi25SCSIIORequest_t *mpi_request;
1756 smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
1758 ioc->io_queue_num[tag] << BLK_MQ_UNIQUE_TAG_BITS | tag;
1760 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1763 * If SCSI IO request is outstanding at driver level then
1764 * DevHandle filed must be non-zero. If DevHandle is zero
1765 * then it means that this smid is free at driver level,
1768 if (!mpi_request->DevHandle)
1771 scmd = scsi_host_find_tag(ioc->shost, unique_tag);
1773 st = scsi_cmd_priv(scmd);
1774 if (st->cb_idx == 0xFF || st->smid == 0)
1782 * scsih_change_queue_depth - setting device queue depth
1783 * @sdev: scsi device struct
1784 * @qdepth: requested queue depth
1786 * Return: queue depth.
1789 scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1791 struct Scsi_Host *shost = sdev->host;
1793 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1794 struct MPT3SAS_DEVICE *sas_device_priv_data;
1795 struct MPT3SAS_TARGET *sas_target_priv_data;
1796 struct _sas_device *sas_device;
1797 unsigned long flags;
1799 max_depth = shost->can_queue;
1802 * limit max device queue for SATA to 32 if enable_sdev_max_qd
1805 if (ioc->enable_sdev_max_qd || ioc->is_gen35_ioc)
1808 sas_device_priv_data = sdev->hostdata;
1809 if (!sas_device_priv_data)
1811 sas_target_priv_data = sas_device_priv_data->sas_target;
1812 if (!sas_target_priv_data)
1814 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))
1817 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1818 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
1820 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
1821 max_depth = MPT3SAS_SATA_QUEUE_DEPTH;
1823 sas_device_put(sas_device);
1825 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1829 if (!sdev->tagged_supported)
1831 if (qdepth > max_depth)
1833 scsi_change_queue_depth(sdev, qdepth);
1834 sdev_printk(KERN_INFO, sdev,
1835 "qdepth(%d), tagged(%d), scsi_level(%d), cmd_que(%d)\n",
1836 sdev->queue_depth, sdev->tagged_supported,
1837 sdev->scsi_level, ((sdev->inquiry[7] & 2) >> 1));
1838 return sdev->queue_depth;
1842 * mpt3sas_scsih_change_queue_depth - setting device queue depth
1843 * @sdev: scsi device struct
1844 * @qdepth: requested queue depth
1849 mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1851 struct Scsi_Host *shost = sdev->host;
1852 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1854 if (ioc->enable_sdev_max_qd)
1855 qdepth = shost->can_queue;
1857 scsih_change_queue_depth(sdev, qdepth);
1861 * scsih_target_alloc - target add routine
1862 * @starget: scsi target struct
1864 * Return: 0 if ok. Any other return is assumed to be an error and
1865 * the device is ignored.
1868 scsih_target_alloc(struct scsi_target *starget)
1870 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1871 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1872 struct MPT3SAS_TARGET *sas_target_priv_data;
1873 struct _sas_device *sas_device;
1874 struct _raid_device *raid_device;
1875 struct _pcie_device *pcie_device;
1876 unsigned long flags;
1877 struct sas_rphy *rphy;
1879 sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data),
1881 if (!sas_target_priv_data)
1884 starget->hostdata = sas_target_priv_data;
1885 sas_target_priv_data->starget = starget;
1886 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
1889 if (starget->channel == RAID_CHANNEL) {
1890 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1891 raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1894 sas_target_priv_data->handle = raid_device->handle;
1895 sas_target_priv_data->sas_address = raid_device->wwid;
1896 sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
1897 if (ioc->is_warpdrive)
1898 sas_target_priv_data->raid_device = raid_device;
1899 raid_device->starget = starget;
1901 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1906 if (starget->channel == PCIE_CHANNEL) {
1907 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1908 pcie_device = __mpt3sas_get_pdev_by_idchannel(ioc, starget->id,
1911 sas_target_priv_data->handle = pcie_device->handle;
1912 sas_target_priv_data->sas_address = pcie_device->wwid;
1913 sas_target_priv_data->port = NULL;
1914 sas_target_priv_data->pcie_dev = pcie_device;
1915 pcie_device->starget = starget;
1916 pcie_device->id = starget->id;
1917 pcie_device->channel = starget->channel;
1918 sas_target_priv_data->flags |=
1919 MPT_TARGET_FLAGS_PCIE_DEVICE;
1920 if (pcie_device->fast_path)
1921 sas_target_priv_data->flags |=
1922 MPT_TARGET_FASTPATH_IO;
1924 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1928 /* sas/sata devices */
1929 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1930 rphy = dev_to_rphy(starget->dev.parent);
1931 sas_device = __mpt3sas_get_sdev_by_rphy(ioc, rphy);
1934 sas_target_priv_data->handle = sas_device->handle;
1935 sas_target_priv_data->sas_address = sas_device->sas_address;
1936 sas_target_priv_data->port = sas_device->port;
1937 sas_target_priv_data->sas_dev = sas_device;
1938 sas_device->starget = starget;
1939 sas_device->id = starget->id;
1940 sas_device->channel = starget->channel;
1941 if (test_bit(sas_device->handle, ioc->pd_handles))
1942 sas_target_priv_data->flags |=
1943 MPT_TARGET_FLAGS_RAID_COMPONENT;
1944 if (sas_device->fast_path)
1945 sas_target_priv_data->flags |=
1946 MPT_TARGET_FASTPATH_IO;
1948 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1954 * scsih_target_destroy - target destroy routine
1955 * @starget: scsi target struct
1958 scsih_target_destroy(struct scsi_target *starget)
1960 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1961 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1962 struct MPT3SAS_TARGET *sas_target_priv_data;
1963 struct _sas_device *sas_device;
1964 struct _raid_device *raid_device;
1965 struct _pcie_device *pcie_device;
1966 unsigned long flags;
1968 sas_target_priv_data = starget->hostdata;
1969 if (!sas_target_priv_data)
1972 if (starget->channel == RAID_CHANNEL) {
1973 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1974 raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1977 raid_device->starget = NULL;
1978 raid_device->sdev = NULL;
1980 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1984 if (starget->channel == PCIE_CHANNEL) {
1985 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1986 pcie_device = __mpt3sas_get_pdev_from_target(ioc,
1987 sas_target_priv_data);
1988 if (pcie_device && (pcie_device->starget == starget) &&
1989 (pcie_device->id == starget->id) &&
1990 (pcie_device->channel == starget->channel))
1991 pcie_device->starget = NULL;
1995 * Corresponding get() is in _scsih_target_alloc()
1997 sas_target_priv_data->pcie_dev = NULL;
1998 pcie_device_put(pcie_device);
1999 pcie_device_put(pcie_device);
2001 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2005 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2006 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
2007 if (sas_device && (sas_device->starget == starget) &&
2008 (sas_device->id == starget->id) &&
2009 (sas_device->channel == starget->channel))
2010 sas_device->starget = NULL;
2014 * Corresponding get() is in _scsih_target_alloc()
2016 sas_target_priv_data->sas_dev = NULL;
2017 sas_device_put(sas_device);
2019 sas_device_put(sas_device);
2021 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2024 kfree(sas_target_priv_data);
2025 starget->hostdata = NULL;
2029 * scsih_slave_alloc - device add routine
2030 * @sdev: scsi device struct
2032 * Return: 0 if ok. Any other return is assumed to be an error and
2033 * the device is ignored.
2036 scsih_slave_alloc(struct scsi_device *sdev)
2038 struct Scsi_Host *shost;
2039 struct MPT3SAS_ADAPTER *ioc;
2040 struct MPT3SAS_TARGET *sas_target_priv_data;
2041 struct MPT3SAS_DEVICE *sas_device_priv_data;
2042 struct scsi_target *starget;
2043 struct _raid_device *raid_device;
2044 struct _sas_device *sas_device;
2045 struct _pcie_device *pcie_device;
2046 unsigned long flags;
2048 sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data),
2050 if (!sas_device_priv_data)
2053 sas_device_priv_data->lun = sdev->lun;
2054 sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT;
2056 starget = scsi_target(sdev);
2057 sas_target_priv_data = starget->hostdata;
2058 sas_target_priv_data->num_luns++;
2059 sas_device_priv_data->sas_target = sas_target_priv_data;
2060 sdev->hostdata = sas_device_priv_data;
2061 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT))
2062 sdev->no_uld_attach = 1;
2064 shost = dev_to_shost(&starget->dev);
2065 ioc = shost_priv(shost);
2066 if (starget->channel == RAID_CHANNEL) {
2067 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2068 raid_device = _scsih_raid_device_find_by_id(ioc,
2069 starget->id, starget->channel);
2071 raid_device->sdev = sdev; /* raid is single lun */
2072 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2074 if (starget->channel == PCIE_CHANNEL) {
2075 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2076 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2077 sas_target_priv_data->sas_address);
2078 if (pcie_device && (pcie_device->starget == NULL)) {
2079 sdev_printk(KERN_INFO, sdev,
2080 "%s : pcie_device->starget set to starget @ %d\n",
2081 __func__, __LINE__);
2082 pcie_device->starget = starget;
2086 pcie_device_put(pcie_device);
2087 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2089 } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
2090 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2091 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
2092 sas_target_priv_data->sas_address,
2093 sas_target_priv_data->port);
2094 if (sas_device && (sas_device->starget == NULL)) {
2095 sdev_printk(KERN_INFO, sdev,
2096 "%s : sas_device->starget set to starget @ %d\n",
2097 __func__, __LINE__);
2098 sas_device->starget = starget;
2102 sas_device_put(sas_device);
2104 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2111 * scsih_slave_destroy - device destroy routine
2112 * @sdev: scsi device struct
2115 scsih_slave_destroy(struct scsi_device *sdev)
2117 struct MPT3SAS_TARGET *sas_target_priv_data;
2118 struct scsi_target *starget;
2119 struct Scsi_Host *shost;
2120 struct MPT3SAS_ADAPTER *ioc;
2121 struct _sas_device *sas_device;
2122 struct _pcie_device *pcie_device;
2123 unsigned long flags;
2125 if (!sdev->hostdata)
2128 starget = scsi_target(sdev);
2129 sas_target_priv_data = starget->hostdata;
2130 sas_target_priv_data->num_luns--;
2132 shost = dev_to_shost(&starget->dev);
2133 ioc = shost_priv(shost);
2135 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2136 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2137 pcie_device = __mpt3sas_get_pdev_from_target(ioc,
2138 sas_target_priv_data);
2139 if (pcie_device && !sas_target_priv_data->num_luns)
2140 pcie_device->starget = NULL;
2143 pcie_device_put(pcie_device);
2145 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2147 } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
2148 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2149 sas_device = __mpt3sas_get_sdev_from_target(ioc,
2150 sas_target_priv_data);
2151 if (sas_device && !sas_target_priv_data->num_luns)
2152 sas_device->starget = NULL;
2155 sas_device_put(sas_device);
2156 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2159 kfree(sdev->hostdata);
2160 sdev->hostdata = NULL;
2164 * _scsih_display_sata_capabilities - sata capabilities
2165 * @ioc: per adapter object
2166 * @handle: device handle
2167 * @sdev: scsi device struct
2170 _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
2171 u16 handle, struct scsi_device *sdev)
2173 Mpi2ConfigReply_t mpi_reply;
2174 Mpi2SasDevicePage0_t sas_device_pg0;
2179 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
2180 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
2181 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2182 __FILE__, __LINE__, __func__);
2186 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
2187 MPI2_IOCSTATUS_MASK;
2188 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
2189 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2190 __FILE__, __LINE__, __func__);
2194 flags = le16_to_cpu(sas_device_pg0.Flags);
2195 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
2197 sdev_printk(KERN_INFO, sdev,
2198 "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), "
2199 "sw_preserve(%s)\n",
2200 (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n",
2201 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n",
2202 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" :
2204 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n",
2205 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n",
2206 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n");
2210 * raid transport support -
2211 * Enabled for SLES11 and newer, in older kernels the driver will panic when
2212 * unloading the driver followed by a load - I believe that the subroutine
2213 * raid_class_release() is not cleaning up properly.
2217 * scsih_is_raid - return boolean indicating device is raid volume
2218 * @dev: the device struct object
2221 scsih_is_raid(struct device *dev)
2223 struct scsi_device *sdev = to_scsi_device(dev);
2224 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2226 if (ioc->is_warpdrive)
2228 return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
2232 scsih_is_nvme(struct device *dev)
2234 struct scsi_device *sdev = to_scsi_device(dev);
2236 return (sdev->channel == PCIE_CHANNEL) ? 1 : 0;
2240 * scsih_get_resync - get raid volume resync percent complete
2241 * @dev: the device struct object
2244 scsih_get_resync(struct device *dev)
2246 struct scsi_device *sdev = to_scsi_device(dev);
2247 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2248 static struct _raid_device *raid_device;
2249 unsigned long flags;
2250 Mpi2RaidVolPage0_t vol_pg0;
2251 Mpi2ConfigReply_t mpi_reply;
2252 u32 volume_status_flags;
2253 u8 percent_complete;
2256 percent_complete = 0;
2258 if (ioc->is_warpdrive)
2261 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2262 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
2265 handle = raid_device->handle;
2266 percent_complete = raid_device->percent_complete;
2268 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2273 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2274 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2275 sizeof(Mpi2RaidVolPage0_t))) {
2276 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2277 __FILE__, __LINE__, __func__);
2278 percent_complete = 0;
2282 volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2283 if (!(volume_status_flags &
2284 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS))
2285 percent_complete = 0;
2289 switch (ioc->hba_mpi_version_belonged) {
2291 raid_set_resync(mpt2sas_raid_template, dev, percent_complete);
2295 raid_set_resync(mpt3sas_raid_template, dev, percent_complete);
2301 * scsih_get_state - get raid volume level
2302 * @dev: the device struct object
2305 scsih_get_state(struct device *dev)
2307 struct scsi_device *sdev = to_scsi_device(dev);
2308 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2309 static struct _raid_device *raid_device;
2310 unsigned long flags;
2311 Mpi2RaidVolPage0_t vol_pg0;
2312 Mpi2ConfigReply_t mpi_reply;
2314 enum raid_state state = RAID_STATE_UNKNOWN;
2317 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2318 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
2321 handle = raid_device->handle;
2322 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2327 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2328 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2329 sizeof(Mpi2RaidVolPage0_t))) {
2330 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2331 __FILE__, __LINE__, __func__);
2335 volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2336 if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
2337 state = RAID_STATE_RESYNCING;
2341 switch (vol_pg0.VolumeState) {
2342 case MPI2_RAID_VOL_STATE_OPTIMAL:
2343 case MPI2_RAID_VOL_STATE_ONLINE:
2344 state = RAID_STATE_ACTIVE;
2346 case MPI2_RAID_VOL_STATE_DEGRADED:
2347 state = RAID_STATE_DEGRADED;
2349 case MPI2_RAID_VOL_STATE_FAILED:
2350 case MPI2_RAID_VOL_STATE_MISSING:
2351 state = RAID_STATE_OFFLINE;
2355 switch (ioc->hba_mpi_version_belonged) {
2357 raid_set_state(mpt2sas_raid_template, dev, state);
2361 raid_set_state(mpt3sas_raid_template, dev, state);
2367 * _scsih_set_level - set raid level
2369 * @sdev: scsi device struct
2370 * @volume_type: volume type
2373 _scsih_set_level(struct MPT3SAS_ADAPTER *ioc,
2374 struct scsi_device *sdev, u8 volume_type)
2376 enum raid_level level = RAID_LEVEL_UNKNOWN;
2378 switch (volume_type) {
2379 case MPI2_RAID_VOL_TYPE_RAID0:
2380 level = RAID_LEVEL_0;
2382 case MPI2_RAID_VOL_TYPE_RAID10:
2383 level = RAID_LEVEL_10;
2385 case MPI2_RAID_VOL_TYPE_RAID1E:
2386 level = RAID_LEVEL_1E;
2388 case MPI2_RAID_VOL_TYPE_RAID1:
2389 level = RAID_LEVEL_1;
2393 switch (ioc->hba_mpi_version_belonged) {
2395 raid_set_level(mpt2sas_raid_template,
2396 &sdev->sdev_gendev, level);
2400 raid_set_level(mpt3sas_raid_template,
2401 &sdev->sdev_gendev, level);
2408 * _scsih_get_volume_capabilities - volume capabilities
2409 * @ioc: per adapter object
2410 * @raid_device: the raid_device object
2412 * Return: 0 for success, else 1
2415 _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
2416 struct _raid_device *raid_device)
2418 Mpi2RaidVolPage0_t *vol_pg0;
2419 Mpi2RaidPhysDiskPage0_t pd_pg0;
2420 Mpi2SasDevicePage0_t sas_device_pg0;
2421 Mpi2ConfigReply_t mpi_reply;
2425 if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
2426 &num_pds)) || !num_pds) {
2428 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2429 __FILE__, __LINE__, __func__));
2433 raid_device->num_pds = num_pds;
2434 sz = struct_size(vol_pg0, PhysDisk, num_pds);
2435 vol_pg0 = kzalloc(sz, GFP_KERNEL);
2438 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2439 __FILE__, __LINE__, __func__));
2443 if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
2444 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
2446 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2447 __FILE__, __LINE__, __func__));
2452 raid_device->volume_type = vol_pg0->VolumeType;
2454 /* figure out what the underlying devices are by
2455 * obtaining the device_info bits for the 1st device
2457 if (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
2458 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM,
2459 vol_pg0->PhysDisk[0].PhysDiskNum))) {
2460 if (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
2461 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
2462 le16_to_cpu(pd_pg0.DevHandle)))) {
2463 raid_device->device_info =
2464 le32_to_cpu(sas_device_pg0.DeviceInfo);
2473 * _scsih_enable_tlr - setting TLR flags
2474 * @ioc: per adapter object
2475 * @sdev: scsi device struct
2477 * Enabling Transaction Layer Retries for tape devices when
2478 * vpd page 0x90 is present
2482 _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
2486 if (sdev->type != TYPE_TAPE)
2489 if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR))
2492 sas_enable_tlr(sdev);
2493 sdev_printk(KERN_INFO, sdev, "TLR %s\n",
2494 sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled");
2500 * scsih_slave_configure - device configure routine.
2501 * @sdev: scsi device struct
2503 * Return: 0 if ok. Any other return is assumed to be an error and
2504 * the device is ignored.
2507 scsih_slave_configure(struct scsi_device *sdev)
2509 struct Scsi_Host *shost = sdev->host;
2510 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2511 struct MPT3SAS_DEVICE *sas_device_priv_data;
2512 struct MPT3SAS_TARGET *sas_target_priv_data;
2513 struct _sas_device *sas_device;
2514 struct _pcie_device *pcie_device;
2515 struct _raid_device *raid_device;
2516 unsigned long flags;
2521 u16 handle, volume_handle = 0;
2522 u64 volume_wwid = 0;
2525 sas_device_priv_data = sdev->hostdata;
2526 sas_device_priv_data->configured_lun = 1;
2527 sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT;
2528 sas_target_priv_data = sas_device_priv_data->sas_target;
2529 handle = sas_target_priv_data->handle;
2531 /* raid volume handling */
2532 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) {
2534 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2535 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
2536 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2539 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2540 __FILE__, __LINE__, __func__));
2544 if (_scsih_get_volume_capabilities(ioc, raid_device)) {
2546 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2547 __FILE__, __LINE__, __func__));
2552 * WARPDRIVE: Initialize the required data for Direct IO
2554 mpt3sas_init_warpdrive_properties(ioc, raid_device);
2556 /* RAID Queue Depth Support
2557 * IS volume = underlying qdepth of drive type, either
2558 * MPT3SAS_SAS_QUEUE_DEPTH or MPT3SAS_SATA_QUEUE_DEPTH
2559 * IM/IME/R10 = 128 (MPT3SAS_RAID_QUEUE_DEPTH)
2561 if (raid_device->device_info &
2562 MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2563 qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2566 qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
2567 if (raid_device->device_info &
2568 MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2574 switch (raid_device->volume_type) {
2575 case MPI2_RAID_VOL_TYPE_RAID0:
2578 case MPI2_RAID_VOL_TYPE_RAID1E:
2579 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2580 if (ioc->manu_pg10.OEMIdentifier &&
2581 (le32_to_cpu(ioc->manu_pg10.GenericFlags0) &
2582 MFG10_GF0_R10_DISPLAY) &&
2583 !(raid_device->num_pds % 2))
2588 case MPI2_RAID_VOL_TYPE_RAID1:
2589 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2592 case MPI2_RAID_VOL_TYPE_RAID10:
2593 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2596 case MPI2_RAID_VOL_TYPE_UNKNOWN:
2598 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2603 if (!ioc->hide_ir_msg)
2604 sdev_printk(KERN_INFO, sdev,
2605 "%s: handle(0x%04x), wwid(0x%016llx),"
2606 " pd_count(%d), type(%s)\n",
2607 r_level, raid_device->handle,
2608 (unsigned long long)raid_device->wwid,
2609 raid_device->num_pds, ds);
2611 if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) {
2612 blk_queue_max_hw_sectors(sdev->request_queue,
2613 MPT3SAS_RAID_MAX_SECTORS);
2614 sdev_printk(KERN_INFO, sdev,
2615 "Set queue's max_sector to: %u\n",
2616 MPT3SAS_RAID_MAX_SECTORS);
2619 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2621 /* raid transport support */
2622 if (!ioc->is_warpdrive)
2623 _scsih_set_level(ioc, sdev, raid_device->volume_type);
2627 /* non-raid handling */
2628 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
2629 if (mpt3sas_config_get_volume_handle(ioc, handle,
2632 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2633 __FILE__, __LINE__, __func__));
2636 if (volume_handle && mpt3sas_config_get_volume_wwid(ioc,
2637 volume_handle, &volume_wwid)) {
2639 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2640 __FILE__, __LINE__, __func__));
2646 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2647 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2648 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2649 sas_device_priv_data->sas_target->sas_address);
2651 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2653 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2654 __FILE__, __LINE__, __func__));
2658 qdepth = ioc->max_nvme_qd;
2660 sdev_printk(KERN_INFO, sdev,
2661 "%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n",
2662 ds, handle, (unsigned long long)pcie_device->wwid,
2663 pcie_device->port_num);
2664 if (pcie_device->enclosure_handle != 0)
2665 sdev_printk(KERN_INFO, sdev,
2666 "%s: enclosure logical id(0x%016llx), slot(%d)\n",
2668 (unsigned long long)pcie_device->enclosure_logical_id,
2670 if (pcie_device->connector_name[0] != '\0')
2671 sdev_printk(KERN_INFO, sdev,
2672 "%s: enclosure level(0x%04x),"
2673 "connector name( %s)\n", ds,
2674 pcie_device->enclosure_level,
2675 pcie_device->connector_name);
2677 if (pcie_device->nvme_mdts)
2678 blk_queue_max_hw_sectors(sdev->request_queue,
2679 pcie_device->nvme_mdts/512);
2681 pcie_device_put(pcie_device);
2682 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2683 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2684 /* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be
2685 ** merged and can eliminate holes created during merging
2688 blk_queue_flag_set(QUEUE_FLAG_NOMERGES,
2689 sdev->request_queue);
2690 blk_queue_virt_boundary(sdev->request_queue,
2691 ioc->page_size - 1);
2695 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2696 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
2697 sas_device_priv_data->sas_target->sas_address,
2698 sas_device_priv_data->sas_target->port);
2700 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2702 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2703 __FILE__, __LINE__, __func__));
2707 sas_device->volume_handle = volume_handle;
2708 sas_device->volume_wwid = volume_wwid;
2709 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2710 qdepth = (sas_device->port_type > 1) ?
2711 ioc->max_wideport_qd : ioc->max_narrowport_qd;
2713 if (sas_device->device_info &
2714 MPI2_SAS_DEVICE_INFO_SEP) {
2715 sdev_printk(KERN_WARNING, sdev,
2716 "set ignore_delay_remove for handle(0x%04x)\n",
2717 sas_device_priv_data->sas_target->handle);
2718 sas_device_priv_data->ignore_delay_remove = 1;
2723 qdepth = ioc->max_sata_qd;
2724 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
2726 else if (sas_device->device_info &
2727 MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2731 sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " \
2732 "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
2733 ds, handle, (unsigned long long)sas_device->sas_address,
2734 sas_device->phy, (unsigned long long)sas_device->device_name);
2736 _scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL);
2738 sas_device_put(sas_device);
2739 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2742 _scsih_display_sata_capabilities(ioc, handle, sdev);
2745 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2748 sas_read_port_mode_page(sdev);
2749 _scsih_enable_tlr(ioc, sdev);
2756 * scsih_bios_param - fetch head, sector, cylinder info for a disk
2757 * @sdev: scsi device struct
2758 * @bdev: pointer to block device context
2759 * @capacity: device size (in 512 byte sectors)
2760 * @params: three element array to place output:
2761 * params[0] number of heads (max 255)
2762 * params[1] number of sectors (max 63)
2763 * params[2] number of cylinders
2766 scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2767 sector_t capacity, int params[])
2777 dummy = heads * sectors;
2778 cylinders = capacity;
2779 sector_div(cylinders, dummy);
2782 * Handle extended translation size for logical drives
2785 if ((ulong)capacity >= 0x200000) {
2788 dummy = heads * sectors;
2789 cylinders = capacity;
2790 sector_div(cylinders, dummy);
2795 params[1] = sectors;
2796 params[2] = cylinders;
2802 * _scsih_response_code - translation of device response code
2803 * @ioc: per adapter object
2804 * @response_code: response code returned by the device
2807 _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
2811 switch (response_code) {
2812 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
2813 desc = "task management request completed";
2815 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
2816 desc = "invalid frame";
2818 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
2819 desc = "task management request not supported";
2821 case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
2822 desc = "task management request failed";
2824 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
2825 desc = "task management request succeeded";
2827 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
2828 desc = "invalid lun";
2831 desc = "overlapped tag attempted";
2833 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
2834 desc = "task queued, however not sent to target";
2840 ioc_warn(ioc, "response_code(0x%01x): %s\n", response_code, desc);
2844 * _scsih_tm_done - tm completion routine
2845 * @ioc: per adapter object
2846 * @smid: system request message index
2847 * @msix_index: MSIX table index supplied by the OS
2848 * @reply: reply message frame(lower 32bit addr)
2851 * The callback handler when using scsih_issue_tm.
2853 * Return: 1 meaning mf should be freed from _base_interrupt
2854 * 0 means the mf is freed from this function.
2857 _scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
2859 MPI2DefaultReply_t *mpi_reply;
2861 if (ioc->tm_cmds.status == MPT3_CMD_NOT_USED)
2863 if (ioc->tm_cmds.smid != smid)
2865 ioc->tm_cmds.status |= MPT3_CMD_COMPLETE;
2866 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
2868 memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
2869 ioc->tm_cmds.status |= MPT3_CMD_REPLY_VALID;
2871 ioc->tm_cmds.status &= ~MPT3_CMD_PENDING;
2872 complete(&ioc->tm_cmds.done);
2877 * mpt3sas_scsih_set_tm_flag - set per target tm_busy
2878 * @ioc: per adapter object
2879 * @handle: device handle
2881 * During taskmangement request, we need to freeze the device queue.
2884 mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2886 struct MPT3SAS_DEVICE *sas_device_priv_data;
2887 struct scsi_device *sdev;
2890 shost_for_each_device(sdev, ioc->shost) {
2893 sas_device_priv_data = sdev->hostdata;
2894 if (!sas_device_priv_data)
2896 if (sas_device_priv_data->sas_target->handle == handle) {
2897 sas_device_priv_data->sas_target->tm_busy = 1;
2899 ioc->ignore_loginfos = 1;
2905 * mpt3sas_scsih_clear_tm_flag - clear per target tm_busy
2906 * @ioc: per adapter object
2907 * @handle: device handle
2909 * During taskmangement request, we need to freeze the device queue.
2912 mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2914 struct MPT3SAS_DEVICE *sas_device_priv_data;
2915 struct scsi_device *sdev;
2918 shost_for_each_device(sdev, ioc->shost) {
2921 sas_device_priv_data = sdev->hostdata;
2922 if (!sas_device_priv_data)
2924 if (sas_device_priv_data->sas_target->handle == handle) {
2925 sas_device_priv_data->sas_target->tm_busy = 0;
2927 ioc->ignore_loginfos = 0;
2933 * scsih_tm_cmd_map_status - map the target reset & LUN reset TM status
2934 * @ioc: per adapter object
2935 * @channel: the channel assigned by the OS
2936 * @id: the id assigned by the OS
2938 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2939 * @smid_task: smid assigned to the task
2941 * Look whether TM has aborted the timed out SCSI command, if
2942 * TM has aborted the IO then return SUCCESS else return FAILED.
2945 scsih_tm_cmd_map_status(struct MPT3SAS_ADAPTER *ioc, uint channel,
2946 uint id, uint lun, u8 type, u16 smid_task)
2949 if (smid_task <= ioc->shost->can_queue) {
2951 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
2952 if (!(_scsih_scsi_lookup_find_by_target(ioc,
2956 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
2957 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
2958 if (!(_scsih_scsi_lookup_find_by_lun(ioc, id,
2965 } else if (smid_task == ioc->scsih_cmds.smid) {
2966 if ((ioc->scsih_cmds.status & MPT3_CMD_COMPLETE) ||
2967 (ioc->scsih_cmds.status & MPT3_CMD_NOT_USED))
2969 } else if (smid_task == ioc->ctl_cmds.smid) {
2970 if ((ioc->ctl_cmds.status & MPT3_CMD_COMPLETE) ||
2971 (ioc->ctl_cmds.status & MPT3_CMD_NOT_USED))
2979 * scsih_tm_post_processing - post processing of target & LUN reset
2980 * @ioc: per adapter object
2981 * @handle: device handle
2982 * @channel: the channel assigned by the OS
2983 * @id: the id assigned by the OS
2985 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2986 * @smid_task: smid assigned to the task
2988 * Post processing of target & LUN reset. Due to interrupt latency
2989 * issue it possible that interrupt for aborted IO might not be
2990 * received yet. So before returning failure status, poll the
2991 * reply descriptor pools for the reply of timed out SCSI command.
2992 * Return FAILED status if reply for timed out is not received
2993 * otherwise return SUCCESS.
2996 scsih_tm_post_processing(struct MPT3SAS_ADAPTER *ioc, u16 handle,
2997 uint channel, uint id, uint lun, u8 type, u16 smid_task)
3001 rc = scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
3006 "Poll ReplyDescriptor queues for completion of"
3007 " smid(%d), task_type(0x%02x), handle(0x%04x)\n",
3008 smid_task, type, handle);
3011 * Due to interrupt latency issues, driver may receive interrupt for
3012 * TM first and then for aborted SCSI IO command. So, poll all the
3013 * ReplyDescriptor pools before returning the FAILED status to SML.
3015 mpt3sas_base_mask_interrupts(ioc);
3016 mpt3sas_base_sync_reply_irqs(ioc, 1);
3017 mpt3sas_base_unmask_interrupts(ioc);
3019 return scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
3023 * mpt3sas_scsih_issue_tm - main routine for sending tm requests
3024 * @ioc: per adapter struct
3025 * @handle: device handle
3026 * @channel: the channel assigned by the OS
3027 * @id: the id assigned by the OS
3029 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
3030 * @smid_task: smid assigned to the task
3031 * @msix_task: MSIX table index supplied by the OS
3032 * @timeout: timeout in seconds
3033 * @tr_method: Target Reset Method
3036 * A generic API for sending task management requests to firmware.
3038 * The callback index is set inside `ioc->tm_cb_idx`.
3039 * The caller is responsible to check for outstanding commands.
3041 * Return: SUCCESS or FAILED.
3044 mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
3045 uint id, u64 lun, u8 type, u16 smid_task, u16 msix_task,
3046 u8 timeout, u8 tr_method)
3048 Mpi2SCSITaskManagementRequest_t *mpi_request;
3049 Mpi2SCSITaskManagementReply_t *mpi_reply;
3050 Mpi25SCSIIORequest_t *request;
3056 lockdep_assert_held(&ioc->tm_cmds.mutex);
3058 if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) {
3059 ioc_info(ioc, "%s: tm_cmd busy!!!\n", __func__);
3063 if (ioc->shost_recovery || ioc->remove_host ||
3064 ioc->pci_error_recovery) {
3065 ioc_info(ioc, "%s: host reset in progress!\n", __func__);
3069 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
3070 if (ioc_state & MPI2_DOORBELL_USED) {
3071 dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
3072 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3073 return (!rc) ? SUCCESS : FAILED;
3076 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
3077 mpt3sas_print_fault_code(ioc, ioc_state &
3078 MPI2_DOORBELL_DATA_MASK);
3079 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3080 return (!rc) ? SUCCESS : FAILED;
3081 } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
3082 MPI2_IOC_STATE_COREDUMP) {
3083 mpt3sas_print_coredump_info(ioc, ioc_state &
3084 MPI2_DOORBELL_DATA_MASK);
3085 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3086 return (!rc) ? SUCCESS : FAILED;
3089 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
3091 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
3096 ioc_info(ioc, "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n",
3097 handle, type, smid_task, timeout, tr_method));
3098 ioc->tm_cmds.status = MPT3_CMD_PENDING;
3099 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3100 ioc->tm_cmds.smid = smid;
3101 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
3102 memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t));
3103 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3104 mpi_request->DevHandle = cpu_to_le16(handle);
3105 mpi_request->TaskType = type;
3106 if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
3107 type == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
3108 mpi_request->MsgFlags = tr_method;
3109 mpi_request->TaskMID = cpu_to_le16(smid_task);
3110 int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
3111 mpt3sas_scsih_set_tm_flag(ioc, handle);
3112 init_completion(&ioc->tm_cmds.done);
3113 ioc->put_smid_hi_priority(ioc, smid, msix_task);
3114 wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
3115 if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
3116 mpt3sas_check_cmd_timeout(ioc,
3117 ioc->tm_cmds.status, mpi_request,
3118 sizeof(Mpi2SCSITaskManagementRequest_t)/4, issue_reset);
3120 rc = mpt3sas_base_hard_reset_handler(ioc,
3122 rc = (!rc) ? SUCCESS : FAILED;
3127 /* sync IRQs in case those were busy during flush. */
3128 mpt3sas_base_sync_reply_irqs(ioc, 0);
3130 if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
3131 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
3132 mpi_reply = ioc->tm_cmds.reply;
3134 ioc_info(ioc, "complete tm: ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
3135 le16_to_cpu(mpi_reply->IOCStatus),
3136 le32_to_cpu(mpi_reply->IOCLogInfo),
3137 le32_to_cpu(mpi_reply->TerminationCount)));
3138 if (ioc->logging_level & MPT_DEBUG_TM) {
3139 _scsih_response_code(ioc, mpi_reply->ResponseCode);
3140 if (mpi_reply->IOCStatus)
3141 _debug_dump_mf(mpi_request,
3142 sizeof(Mpi2SCSITaskManagementRequest_t)/4);
3147 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
3150 * If DevHandle filed in smid_task's entry of request pool
3151 * doesn't match with device handle on which this task abort
3152 * TM is received then it means that TM has successfully
3153 * aborted the timed out command. Since smid_task's entry in
3154 * request pool will be memset to zero once the timed out
3155 * command is returned to the SML. If the command is not
3156 * aborted then smid_task’s entry won’t be cleared and it
3157 * will have same DevHandle value on which this task abort TM
3158 * is received and driver will return the TM status as FAILED.
3160 request = mpt3sas_base_get_msg_frame(ioc, smid_task);
3161 if (le16_to_cpu(request->DevHandle) != handle)
3164 ioc_info(ioc, "Task abort tm failed: handle(0x%04x),"
3165 "timeout(%d) tr_method(0x%x) smid(%d) msix_index(%d)\n",
3166 handle, timeout, tr_method, smid_task, msix_task);
3170 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
3171 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
3172 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
3173 rc = scsih_tm_post_processing(ioc, handle, channel, id, lun,
3176 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
3185 mpt3sas_scsih_clear_tm_flag(ioc, handle);
3186 ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
3190 int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
3191 uint channel, uint id, u64 lun, u8 type, u16 smid_task,
3192 u16 msix_task, u8 timeout, u8 tr_method)
3196 mutex_lock(&ioc->tm_cmds.mutex);
3197 ret = mpt3sas_scsih_issue_tm(ioc, handle, channel, id, lun, type,
3198 smid_task, msix_task, timeout, tr_method);
3199 mutex_unlock(&ioc->tm_cmds.mutex);
3205 * _scsih_tm_display_info - displays info about the device
3206 * @ioc: per adapter struct
3207 * @scmd: pointer to scsi command object
3209 * Called by task management callback handlers.
3212 _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
3214 struct scsi_target *starget = scmd->device->sdev_target;
3215 struct MPT3SAS_TARGET *priv_target = starget->hostdata;
3216 struct _sas_device *sas_device = NULL;
3217 struct _pcie_device *pcie_device = NULL;
3218 unsigned long flags;
3219 char *device_str = NULL;
3223 if (ioc->hide_ir_msg)
3224 device_str = "WarpDrive";
3226 device_str = "volume";
3228 scsi_print_command(scmd);
3229 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
3230 starget_printk(KERN_INFO, starget,
3231 "%s handle(0x%04x), %s wwid(0x%016llx)\n",
3232 device_str, priv_target->handle,
3233 device_str, (unsigned long long)priv_target->sas_address);
3235 } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
3236 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
3237 pcie_device = __mpt3sas_get_pdev_from_target(ioc, priv_target);
3239 starget_printk(KERN_INFO, starget,
3240 "handle(0x%04x), wwid(0x%016llx), port(%d)\n",
3241 pcie_device->handle,
3242 (unsigned long long)pcie_device->wwid,
3243 pcie_device->port_num);
3244 if (pcie_device->enclosure_handle != 0)
3245 starget_printk(KERN_INFO, starget,
3246 "enclosure logical id(0x%016llx), slot(%d)\n",
3247 (unsigned long long)
3248 pcie_device->enclosure_logical_id,
3250 if (pcie_device->connector_name[0] != '\0')
3251 starget_printk(KERN_INFO, starget,
3252 "enclosure level(0x%04x), connector name( %s)\n",
3253 pcie_device->enclosure_level,
3254 pcie_device->connector_name);
3255 pcie_device_put(pcie_device);
3257 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
3260 spin_lock_irqsave(&ioc->sas_device_lock, flags);
3261 sas_device = __mpt3sas_get_sdev_from_target(ioc, priv_target);
3263 if (priv_target->flags &
3264 MPT_TARGET_FLAGS_RAID_COMPONENT) {
3265 starget_printk(KERN_INFO, starget,
3266 "volume handle(0x%04x), "
3267 "volume wwid(0x%016llx)\n",
3268 sas_device->volume_handle,
3269 (unsigned long long)sas_device->volume_wwid);
3271 starget_printk(KERN_INFO, starget,
3272 "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n",
3274 (unsigned long long)sas_device->sas_address,
3277 _scsih_display_enclosure_chassis_info(NULL, sas_device,
3280 sas_device_put(sas_device);
3282 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3287 * scsih_abort - eh threads main abort routine
3288 * @scmd: pointer to scsi command object
3290 * Return: SUCCESS if command aborted else FAILED
3293 scsih_abort(struct scsi_cmnd *scmd)
3295 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3296 struct MPT3SAS_DEVICE *sas_device_priv_data;
3297 struct scsiio_tracker *st = scsi_cmd_priv(scmd);
3302 struct _pcie_device *pcie_device = NULL;
3303 sdev_printk(KERN_INFO, scmd->device, "attempting task abort!"
3304 "scmd(0x%p), outstanding for %u ms & timeout %u ms\n",
3305 scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc),
3306 (scsi_cmd_to_rq(scmd)->timeout / HZ) * 1000);
3307 _scsih_tm_display_info(ioc, scmd);
3309 sas_device_priv_data = scmd->device->hostdata;
3310 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3312 sdev_printk(KERN_INFO, scmd->device,
3313 "device been deleted! scmd(0x%p)\n", scmd);
3314 scmd->result = DID_NO_CONNECT << 16;
3320 /* check for completed command */
3321 if (st == NULL || st->cb_idx == 0xFF) {
3322 sdev_printk(KERN_INFO, scmd->device, "No reference found at "
3323 "driver, assuming scmd(0x%p) might have completed\n", scmd);
3324 scmd->result = DID_RESET << 16;
3329 /* for hidden raid components and volumes this is not supported */
3330 if (sas_device_priv_data->sas_target->flags &
3331 MPT_TARGET_FLAGS_RAID_COMPONENT ||
3332 sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) {
3333 scmd->result = DID_RESET << 16;
3338 mpt3sas_halt_firmware(ioc);
3340 handle = sas_device_priv_data->sas_target->handle;
3341 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3342 if (pcie_device && (!ioc->tm_custom_handling) &&
3343 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))))
3344 timeout = ioc->nvme_abort_timeout;
3345 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3346 scmd->device->id, scmd->device->lun,
3347 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
3348 st->smid, st->msix_io, timeout, 0);
3349 /* Command must be cleared after abort */
3350 if (r == SUCCESS && st->cb_idx != 0xFF)
3353 sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(0x%p)\n",
3354 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3356 pcie_device_put(pcie_device);
3361 * scsih_dev_reset - eh threads main device reset routine
3362 * @scmd: pointer to scsi command object
3364 * Return: SUCCESS if command aborted else FAILED
3367 scsih_dev_reset(struct scsi_cmnd *scmd)
3369 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3370 struct MPT3SAS_DEVICE *sas_device_priv_data;
3371 struct _sas_device *sas_device = NULL;
3372 struct _pcie_device *pcie_device = NULL;
3378 struct scsi_target *starget = scmd->device->sdev_target;
3379 struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
3381 sdev_printk(KERN_INFO, scmd->device,
3382 "attempting device reset! scmd(0x%p)\n", scmd);
3383 _scsih_tm_display_info(ioc, scmd);
3385 sas_device_priv_data = scmd->device->hostdata;
3386 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3388 sdev_printk(KERN_INFO, scmd->device,
3389 "device been deleted! scmd(0x%p)\n", scmd);
3390 scmd->result = DID_NO_CONNECT << 16;
3396 /* for hidden raid components obtain the volume_handle */
3398 if (sas_device_priv_data->sas_target->flags &
3399 MPT_TARGET_FLAGS_RAID_COMPONENT) {
3400 sas_device = mpt3sas_get_sdev_from_target(ioc,
3403 handle = sas_device->volume_handle;
3405 handle = sas_device_priv_data->sas_target->handle;
3408 scmd->result = DID_RESET << 16;
3413 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3415 if (pcie_device && (!ioc->tm_custom_handling) &&
3416 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
3417 tr_timeout = pcie_device->reset_timeout;
3418 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3420 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3422 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3423 scmd->device->id, scmd->device->lun,
3424 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0,
3425 tr_timeout, tr_method);
3426 /* Check for busy commands after reset */
3427 if (r == SUCCESS && scsi_device_busy(scmd->device))
3430 sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(0x%p)\n",
3431 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3434 sas_device_put(sas_device);
3436 pcie_device_put(pcie_device);
3442 * scsih_target_reset - eh threads main target reset routine
3443 * @scmd: pointer to scsi command object
3445 * Return: SUCCESS if command aborted else FAILED
3448 scsih_target_reset(struct scsi_cmnd *scmd)
3450 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3451 struct MPT3SAS_DEVICE *sas_device_priv_data;
3452 struct _sas_device *sas_device = NULL;
3453 struct _pcie_device *pcie_device = NULL;
3458 struct scsi_target *starget = scmd->device->sdev_target;
3459 struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
3461 starget_printk(KERN_INFO, starget,
3462 "attempting target reset! scmd(0x%p)\n", scmd);
3463 _scsih_tm_display_info(ioc, scmd);
3465 sas_device_priv_data = scmd->device->hostdata;
3466 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3468 starget_printk(KERN_INFO, starget,
3469 "target been deleted! scmd(0x%p)\n", scmd);
3470 scmd->result = DID_NO_CONNECT << 16;
3476 /* for hidden raid components obtain the volume_handle */
3478 if (sas_device_priv_data->sas_target->flags &
3479 MPT_TARGET_FLAGS_RAID_COMPONENT) {
3480 sas_device = mpt3sas_get_sdev_from_target(ioc,
3483 handle = sas_device->volume_handle;
3485 handle = sas_device_priv_data->sas_target->handle;
3488 scmd->result = DID_RESET << 16;
3493 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3495 if (pcie_device && (!ioc->tm_custom_handling) &&
3496 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
3497 tr_timeout = pcie_device->reset_timeout;
3498 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3500 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3501 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3502 scmd->device->id, 0,
3503 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0,
3504 tr_timeout, tr_method);
3505 /* Check for busy commands after reset */
3506 if (r == SUCCESS && atomic_read(&starget->target_busy))
3509 starget_printk(KERN_INFO, starget, "target reset: %s scmd(0x%p)\n",
3510 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3513 sas_device_put(sas_device);
3515 pcie_device_put(pcie_device);
3521 * scsih_host_reset - eh threads main host reset routine
3522 * @scmd: pointer to scsi command object
3524 * Return: SUCCESS if command aborted else FAILED
3527 scsih_host_reset(struct scsi_cmnd *scmd)
3529 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3532 ioc_info(ioc, "attempting host reset! scmd(0x%p)\n", scmd);
3533 scsi_print_command(scmd);
3535 if (ioc->is_driver_loading || ioc->remove_host) {
3536 ioc_info(ioc, "Blocking the host reset\n");
3541 retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3542 r = (retval < 0) ? FAILED : SUCCESS;
3544 ioc_info(ioc, "host reset: %s scmd(0x%p)\n",
3545 r == SUCCESS ? "SUCCESS" : "FAILED", scmd);
3551 * _scsih_fw_event_add - insert and queue up fw_event
3552 * @ioc: per adapter object
3553 * @fw_event: object describing the event
3554 * Context: This function will acquire ioc->fw_event_lock.
3556 * This adds the firmware event object into link list, then queues it up to
3557 * be processed from user context.
3560 _scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
3562 unsigned long flags;
3564 if (ioc->firmware_event_thread == NULL)
3567 spin_lock_irqsave(&ioc->fw_event_lock, flags);
3568 fw_event_work_get(fw_event);
3569 INIT_LIST_HEAD(&fw_event->list);
3570 list_add_tail(&fw_event->list, &ioc->fw_event_list);
3571 INIT_WORK(&fw_event->work, _firmware_event_work);
3572 fw_event_work_get(fw_event);
3573 queue_work(ioc->firmware_event_thread, &fw_event->work);
3574 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3578 * _scsih_fw_event_del_from_list - delete fw_event from the list
3579 * @ioc: per adapter object
3580 * @fw_event: object describing the event
3581 * Context: This function will acquire ioc->fw_event_lock.
3583 * If the fw_event is on the fw_event_list, remove it and do a put.
3586 _scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
3589 unsigned long flags;
3591 spin_lock_irqsave(&ioc->fw_event_lock, flags);
3592 if (!list_empty(&fw_event->list)) {
3593 list_del_init(&fw_event->list);
3594 fw_event_work_put(fw_event);
3596 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3601 * mpt3sas_send_trigger_data_event - send event for processing trigger data
3602 * @ioc: per adapter object
3603 * @event_data: trigger event data
3606 mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
3607 struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
3609 struct fw_event_work *fw_event;
3612 if (ioc->is_driver_loading)
3614 sz = sizeof(*event_data);
3615 fw_event = alloc_fw_event_work(sz);
3618 fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG;
3619 fw_event->ioc = ioc;
3620 memcpy(fw_event->event_data, event_data, sizeof(*event_data));
3621 _scsih_fw_event_add(ioc, fw_event);
3622 fw_event_work_put(fw_event);
3626 * _scsih_error_recovery_delete_devices - remove devices not responding
3627 * @ioc: per adapter object
3630 _scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc)
3632 struct fw_event_work *fw_event;
3634 fw_event = alloc_fw_event_work(0);
3637 fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
3638 fw_event->ioc = ioc;
3639 _scsih_fw_event_add(ioc, fw_event);
3640 fw_event_work_put(fw_event);
3644 * mpt3sas_port_enable_complete - port enable completed (fake event)
3645 * @ioc: per adapter object
3648 mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc)
3650 struct fw_event_work *fw_event;
3652 fw_event = alloc_fw_event_work(0);
3655 fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE;
3656 fw_event->ioc = ioc;
3657 _scsih_fw_event_add(ioc, fw_event);
3658 fw_event_work_put(fw_event);
3661 static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
3663 unsigned long flags;
3664 struct fw_event_work *fw_event = NULL;
3666 spin_lock_irqsave(&ioc->fw_event_lock, flags);
3667 if (!list_empty(&ioc->fw_event_list)) {
3668 fw_event = list_first_entry(&ioc->fw_event_list,
3669 struct fw_event_work, list);
3670 list_del_init(&fw_event->list);
3671 fw_event_work_put(fw_event);
3673 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3679 * _scsih_fw_event_cleanup_queue - cleanup event queue
3680 * @ioc: per adapter object
3682 * Walk the firmware event queue, either killing timers, or waiting
3683 * for outstanding events to complete
3685 * Context: task, can sleep
3688 _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
3690 struct fw_event_work *fw_event;
3692 if ((list_empty(&ioc->fw_event_list) && !ioc->current_event) ||
3693 !ioc->firmware_event_thread)
3696 * Set current running event as ignore, so that
3697 * current running event will exit quickly.
3698 * As diag reset has occurred it is of no use
3699 * to process remaining stale event data entries.
3701 if (ioc->shost_recovery && ioc->current_event)
3702 ioc->current_event->ignore = 1;
3704 ioc->fw_events_cleanup = 1;
3705 while ((fw_event = dequeue_next_fw_event(ioc)) ||
3706 (fw_event = ioc->current_event)) {
3709 * Don't call cancel_work_sync() for current_event
3710 * other than MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
3711 * otherwise we may observe deadlock if current
3712 * hard reset issued as part of processing the current_event.
3714 * Orginal logic of cleaning the current_event is added
3715 * for handling the back to back host reset issued by the user.
3716 * i.e. during back to back host reset, driver use to process
3717 * the two instances of MPT3SAS_REMOVE_UNRESPONDING_DEVICES
3718 * event back to back and this made the drives to unregister
3719 * the devices from SML.
3722 if (fw_event == ioc->current_event &&
3723 ioc->current_event->event !=
3724 MPT3SAS_REMOVE_UNRESPONDING_DEVICES) {
3725 ioc->current_event = NULL;
3730 * Driver has to clear ioc->start_scan flag when
3731 * it is cleaning up MPT3SAS_PORT_ENABLE_COMPLETE,
3732 * otherwise scsi_scan_host() API waits for the
3733 * 5 minute timer to expire. If we exit from
3734 * scsi_scan_host() early then we can issue the
3735 * new port enable request as part of current diag reset.
3737 if (fw_event->event == MPT3SAS_PORT_ENABLE_COMPLETE) {
3738 ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
3739 ioc->start_scan = 0;
3743 * Wait on the fw_event to complete. If this returns 1, then
3744 * the event was never executed, and we need a put for the
3745 * reference the work had on the fw_event.
3747 * If it did execute, we wait for it to finish, and the put will
3748 * happen from _firmware_event_work()
3750 if (cancel_work_sync(&fw_event->work))
3751 fw_event_work_put(fw_event);
3754 ioc->fw_events_cleanup = 0;
3758 * _scsih_internal_device_block - block the sdev device
3759 * @sdev: per device object
3760 * @sas_device_priv_data : per device driver private data
3762 * make sure device is blocked without error, if not
3766 _scsih_internal_device_block(struct scsi_device *sdev,
3767 struct MPT3SAS_DEVICE *sas_device_priv_data)
3771 sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n",
3772 sas_device_priv_data->sas_target->handle);
3773 sas_device_priv_data->block = 1;
3775 r = scsi_internal_device_block_nowait(sdev);
3777 sdev_printk(KERN_WARNING, sdev,
3778 "device_block failed with return(%d) for handle(0x%04x)\n",
3779 r, sas_device_priv_data->sas_target->handle);
3783 * _scsih_internal_device_unblock - unblock the sdev device
3784 * @sdev: per device object
3785 * @sas_device_priv_data : per device driver private data
3786 * make sure device is unblocked without error, if not retry
3787 * by blocking and then unblocking
3791 _scsih_internal_device_unblock(struct scsi_device *sdev,
3792 struct MPT3SAS_DEVICE *sas_device_priv_data)
3796 sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, "
3797 "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle);
3798 sas_device_priv_data->block = 0;
3799 r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3801 /* The device has been set to SDEV_RUNNING by SD layer during
3802 * device addition but the request queue is still stopped by
3803 * our earlier block call. We need to perform a block again
3804 * to get the device to SDEV_BLOCK and then to SDEV_RUNNING */
3806 sdev_printk(KERN_WARNING, sdev,
3807 "device_unblock failed with return(%d) for handle(0x%04x) "
3808 "performing a block followed by an unblock\n",
3809 r, sas_device_priv_data->sas_target->handle);
3810 sas_device_priv_data->block = 1;
3811 r = scsi_internal_device_block_nowait(sdev);
3813 sdev_printk(KERN_WARNING, sdev, "retried device_block "
3814 "failed with return(%d) for handle(0x%04x)\n",
3815 r, sas_device_priv_data->sas_target->handle);
3817 sas_device_priv_data->block = 0;
3818 r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3820 sdev_printk(KERN_WARNING, sdev, "retried device_unblock"
3821 " failed with return(%d) for handle(0x%04x)\n",
3822 r, sas_device_priv_data->sas_target->handle);
3827 * _scsih_ublock_io_all_device - unblock every device
3828 * @ioc: per adapter object
3830 * change the device state from block to running
3833 _scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3835 struct MPT3SAS_DEVICE *sas_device_priv_data;
3836 struct scsi_device *sdev;
3838 shost_for_each_device(sdev, ioc->shost) {
3839 sas_device_priv_data = sdev->hostdata;
3840 if (!sas_device_priv_data)
3842 if (!sas_device_priv_data->block)
3845 dewtprintk(ioc, sdev_printk(KERN_INFO, sdev,
3846 "device_running, handle(0x%04x)\n",
3847 sas_device_priv_data->sas_target->handle));
3848 _scsih_internal_device_unblock(sdev, sas_device_priv_data);
3854 * _scsih_ublock_io_device - prepare device to be deleted
3855 * @ioc: per adapter object
3856 * @sas_address: sas address
3857 * @port: hba port entry
3859 * unblock then put device in offline state
3862 _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc,
3863 u64 sas_address, struct hba_port *port)
3865 struct MPT3SAS_DEVICE *sas_device_priv_data;
3866 struct scsi_device *sdev;
3868 shost_for_each_device(sdev, ioc->shost) {
3869 sas_device_priv_data = sdev->hostdata;
3870 if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
3872 if (sas_device_priv_data->sas_target->sas_address
3875 if (sas_device_priv_data->sas_target->port != port)
3877 if (sas_device_priv_data->block)
3878 _scsih_internal_device_unblock(sdev,
3879 sas_device_priv_data);
3884 * _scsih_block_io_all_device - set the device state to SDEV_BLOCK
3885 * @ioc: per adapter object
3887 * During device pull we need to appropriately set the sdev state.
3890 _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3892 struct MPT3SAS_DEVICE *sas_device_priv_data;
3893 struct scsi_device *sdev;
3895 shost_for_each_device(sdev, ioc->shost) {
3896 sas_device_priv_data = sdev->hostdata;
3897 if (!sas_device_priv_data)
3899 if (sas_device_priv_data->block)
3901 if (sas_device_priv_data->ignore_delay_remove) {
3902 sdev_printk(KERN_INFO, sdev,
3903 "%s skip device_block for SES handle(0x%04x)\n",
3904 __func__, sas_device_priv_data->sas_target->handle);
3907 _scsih_internal_device_block(sdev, sas_device_priv_data);
3912 * _scsih_block_io_device - set the device state to SDEV_BLOCK
3913 * @ioc: per adapter object
3914 * @handle: device handle
3916 * During device pull we need to appropriately set the sdev state.
3919 _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3921 struct MPT3SAS_DEVICE *sas_device_priv_data;
3922 struct scsi_device *sdev;
3923 struct _sas_device *sas_device;
3925 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
3927 shost_for_each_device(sdev, ioc->shost) {
3928 sas_device_priv_data = sdev->hostdata;
3929 if (!sas_device_priv_data)
3931 if (sas_device_priv_data->sas_target->handle != handle)
3933 if (sas_device_priv_data->block)
3935 if (sas_device && sas_device->pend_sas_rphy_add)
3937 if (sas_device_priv_data->ignore_delay_remove) {
3938 sdev_printk(KERN_INFO, sdev,
3939 "%s skip device_block for SES handle(0x%04x)\n",
3940 __func__, sas_device_priv_data->sas_target->handle);
3943 _scsih_internal_device_block(sdev, sas_device_priv_data);
3947 sas_device_put(sas_device);
3951 * _scsih_block_io_to_children_attached_to_ex
3952 * @ioc: per adapter object
3953 * @sas_expander: the sas_device object
3955 * This routine set sdev state to SDEV_BLOCK for all devices
3956 * attached to this expander. This function called when expander is
3960 _scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc,
3961 struct _sas_node *sas_expander)
3963 struct _sas_port *mpt3sas_port;
3964 struct _sas_device *sas_device;
3965 struct _sas_node *expander_sibling;
3966 unsigned long flags;
3971 list_for_each_entry(mpt3sas_port,
3972 &sas_expander->sas_port_list, port_list) {
3973 if (mpt3sas_port->remote_identify.device_type ==
3975 spin_lock_irqsave(&ioc->sas_device_lock, flags);
3976 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
3977 mpt3sas_port->remote_identify.sas_address,
3978 mpt3sas_port->hba_port);
3980 set_bit(sas_device->handle,
3981 ioc->blocking_handles);
3982 sas_device_put(sas_device);
3984 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3988 list_for_each_entry(mpt3sas_port,
3989 &sas_expander->sas_port_list, port_list) {
3991 if (mpt3sas_port->remote_identify.device_type ==
3992 SAS_EDGE_EXPANDER_DEVICE ||
3993 mpt3sas_port->remote_identify.device_type ==
3994 SAS_FANOUT_EXPANDER_DEVICE) {
3996 mpt3sas_scsih_expander_find_by_sas_address(
3997 ioc, mpt3sas_port->remote_identify.sas_address,
3998 mpt3sas_port->hba_port);
3999 _scsih_block_io_to_children_attached_to_ex(ioc,
4006 * _scsih_block_io_to_children_attached_directly
4007 * @ioc: per adapter object
4008 * @event_data: topology change event data
4010 * This routine set sdev state to SDEV_BLOCK for all devices
4011 * direct attached during device pull.
4014 _scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
4015 Mpi2EventDataSasTopologyChangeList_t *event_data)
4021 for (i = 0; i < event_data->NumEntries; i++) {
4022 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
4025 reason_code = event_data->PHY[i].PhyStatus &
4026 MPI2_EVENT_SAS_TOPO_RC_MASK;
4027 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
4028 _scsih_block_io_device(ioc, handle);
4033 * _scsih_block_io_to_pcie_children_attached_directly
4034 * @ioc: per adapter object
4035 * @event_data: topology change event data
4037 * This routine set sdev state to SDEV_BLOCK for all devices
4038 * direct attached during device pull/reconnect.
4041 _scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
4042 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
4048 for (i = 0; i < event_data->NumEntries; i++) {
4050 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
4053 reason_code = event_data->PortEntry[i].PortStatus;
4055 MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING)
4056 _scsih_block_io_device(ioc, handle);
4060 * _scsih_tm_tr_send - send task management request
4061 * @ioc: per adapter object
4062 * @handle: device handle
4063 * Context: interrupt time.
4065 * This code is to initiate the device removal handshake protocol
4066 * with controller firmware. This function will issue target reset
4067 * using high priority request queue. It will send a sas iounit
4068 * control request (MPI2_SAS_OP_REMOVE_DEVICE) from this completion.
4070 * This is designed to send muliple task management request at the same
4071 * time to the fifo. If the fifo is full, we will append the request,
4072 * and process it in a future completion.
4075 _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4077 Mpi2SCSITaskManagementRequest_t *mpi_request;
4079 struct _sas_device *sas_device = NULL;
4080 struct _pcie_device *pcie_device = NULL;
4081 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
4082 u64 sas_address = 0;
4083 unsigned long flags;
4084 struct _tr_list *delayed_tr;
4087 struct hba_port *port = NULL;
4089 if (ioc->pci_error_recovery) {
4091 ioc_info(ioc, "%s: host in pci error recovery: handle(0x%04x)\n",
4095 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4096 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4098 ioc_info(ioc, "%s: host is not operational: handle(0x%04x)\n",
4103 /* if PD, then return */
4104 if (test_bit(handle, ioc->pd_handles))
4107 clear_bit(handle, ioc->pend_os_device_add);
4109 spin_lock_irqsave(&ioc->sas_device_lock, flags);
4110 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
4111 if (sas_device && sas_device->starget &&
4112 sas_device->starget->hostdata) {
4113 sas_target_priv_data = sas_device->starget->hostdata;
4114 sas_target_priv_data->deleted = 1;
4115 sas_address = sas_device->sas_address;
4116 port = sas_device->port;
4118 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
4120 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
4121 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
4122 if (pcie_device && pcie_device->starget &&
4123 pcie_device->starget->hostdata) {
4124 sas_target_priv_data = pcie_device->starget->hostdata;
4125 sas_target_priv_data->deleted = 1;
4126 sas_address = pcie_device->wwid;
4128 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
4129 if (pcie_device && (!ioc->tm_custom_handling) &&
4130 (!(mpt3sas_scsih_is_pcie_scsi_device(
4131 pcie_device->device_info))))
4133 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
4135 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
4137 if (sas_target_priv_data) {
4139 ioc_info(ioc, "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
4140 handle, (u64)sas_address));
4142 if (sas_device->enclosure_handle != 0)
4144 ioc_info(ioc, "setting delete flag:enclosure logical id(0x%016llx), slot(%d)\n",
4145 (u64)sas_device->enclosure_logical_id,
4147 if (sas_device->connector_name[0] != '\0')
4149 ioc_info(ioc, "setting delete flag: enclosure level(0x%04x), connector name( %s)\n",
4150 sas_device->enclosure_level,
4151 sas_device->connector_name));
4152 } else if (pcie_device) {
4153 if (pcie_device->enclosure_handle != 0)
4155 ioc_info(ioc, "setting delete flag: logical id(0x%016llx), slot(%d)\n",
4156 (u64)pcie_device->enclosure_logical_id,
4157 pcie_device->slot));
4158 if (pcie_device->connector_name[0] != '\0')
4160 ioc_info(ioc, "setting delete flag:, enclosure level(0x%04x), connector name( %s)\n",
4161 pcie_device->enclosure_level,
4162 pcie_device->connector_name));
4164 _scsih_ublock_io_device(ioc, sas_address, port);
4165 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
4168 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx);
4170 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4173 INIT_LIST_HEAD(&delayed_tr->list);
4174 delayed_tr->handle = handle;
4175 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
4177 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4183 ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4184 handle, smid, ioc->tm_tr_cb_idx));
4185 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4186 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
4187 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
4188 mpi_request->DevHandle = cpu_to_le16(handle);
4189 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
4190 mpi_request->MsgFlags = tr_method;
4191 set_bit(handle, ioc->device_remove_in_progress);
4192 ioc->put_smid_hi_priority(ioc, smid, 0);
4193 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
4197 sas_device_put(sas_device);
4199 pcie_device_put(pcie_device);
4203 * _scsih_tm_tr_complete -
4204 * @ioc: per adapter object
4205 * @smid: system request message index
4206 * @msix_index: MSIX table index supplied by the OS
4207 * @reply: reply message frame(lower 32bit addr)
4208 * Context: interrupt time.
4210 * This is the target reset completion routine.
4211 * This code is part of the code to initiate the device removal
4212 * handshake protocol with controller firmware.
4213 * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE)
4215 * Return: 1 meaning mf should be freed from _base_interrupt
4216 * 0 means the mf is freed from this function.
4219 _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
4223 Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
4224 Mpi2SCSITaskManagementReply_t *mpi_reply =
4225 mpt3sas_base_get_reply_virt_addr(ioc, reply);
4226 Mpi2SasIoUnitControlRequest_t *mpi_request;
4229 struct _sc_list *delayed_sc;
4231 if (ioc->pci_error_recovery) {
4233 ioc_info(ioc, "%s: host in pci error recovery\n",
4237 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4238 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4240 ioc_info(ioc, "%s: host is not operational\n",
4244 if (unlikely(!mpi_reply)) {
4245 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4246 __FILE__, __LINE__, __func__);
4249 mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
4250 handle = le16_to_cpu(mpi_request_tm->DevHandle);
4251 if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
4253 ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
4255 le16_to_cpu(mpi_reply->DevHandle), smid));
4259 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
4261 ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
4262 handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
4263 le32_to_cpu(mpi_reply->IOCLogInfo),
4264 le32_to_cpu(mpi_reply->TerminationCount)));
4266 smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
4267 if (!smid_sas_ctrl) {
4268 delayed_sc = kzalloc(sizeof(*delayed_sc), GFP_ATOMIC);
4270 return _scsih_check_for_pending_tm(ioc, smid);
4271 INIT_LIST_HEAD(&delayed_sc->list);
4272 delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle);
4273 list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list);
4275 ioc_info(ioc, "DELAYED:sc:handle(0x%04x), (open)\n",
4277 return _scsih_check_for_pending_tm(ioc, smid);
4281 ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4282 handle, smid_sas_ctrl, ioc->tm_sas_control_cb_idx));
4283 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl);
4284 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4285 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4286 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4287 mpi_request->DevHandle = mpi_request_tm->DevHandle;
4288 ioc->put_smid_default(ioc, smid_sas_ctrl);
4290 return _scsih_check_for_pending_tm(ioc, smid);
4293 /** _scsih_allow_scmd_to_device - check whether scmd needs to
4294 * issue to IOC or not.
4295 * @ioc: per adapter object
4296 * @scmd: pointer to scsi command object
4298 * Returns true if scmd can be issued to IOC otherwise returns false.
4300 inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc,
4301 struct scsi_cmnd *scmd)
4304 if (ioc->pci_error_recovery)
4307 if (ioc->hba_mpi_version_belonged == MPI2_VERSION) {
4308 if (ioc->remove_host)
4314 if (ioc->remove_host) {
4316 switch (scmd->cmnd[0]) {
4317 case SYNCHRONIZE_CACHE:
4329 * _scsih_sas_control_complete - completion routine
4330 * @ioc: per adapter object
4331 * @smid: system request message index
4332 * @msix_index: MSIX table index supplied by the OS
4333 * @reply: reply message frame(lower 32bit addr)
4334 * Context: interrupt time.
4336 * This is the sas iounit control completion routine.
4337 * This code is part of the code to initiate the device removal
4338 * handshake protocol with controller firmware.
4340 * Return: 1 meaning mf should be freed from _base_interrupt
4341 * 0 means the mf is freed from this function.
4344 _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4345 u8 msix_index, u32 reply)
4347 Mpi2SasIoUnitControlReply_t *mpi_reply =
4348 mpt3sas_base_get_reply_virt_addr(ioc, reply);
4350 if (likely(mpi_reply)) {
4352 ioc_info(ioc, "sc_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
4353 le16_to_cpu(mpi_reply->DevHandle), smid,
4354 le16_to_cpu(mpi_reply->IOCStatus),
4355 le32_to_cpu(mpi_reply->IOCLogInfo)));
4356 if (le16_to_cpu(mpi_reply->IOCStatus) ==
4357 MPI2_IOCSTATUS_SUCCESS) {
4358 clear_bit(le16_to_cpu(mpi_reply->DevHandle),
4359 ioc->device_remove_in_progress);
4362 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4363 __FILE__, __LINE__, __func__);
4365 return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
4369 * _scsih_tm_tr_volume_send - send target reset request for volumes
4370 * @ioc: per adapter object
4371 * @handle: device handle
4372 * Context: interrupt time.
4374 * This is designed to send muliple task management request at the same
4375 * time to the fifo. If the fifo is full, we will append the request,
4376 * and process it in a future completion.
4379 _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4381 Mpi2SCSITaskManagementRequest_t *mpi_request;
4383 struct _tr_list *delayed_tr;
4385 if (ioc->pci_error_recovery) {
4387 ioc_info(ioc, "%s: host reset in progress!\n",
4392 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx);
4394 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4397 INIT_LIST_HEAD(&delayed_tr->list);
4398 delayed_tr->handle = handle;
4399 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list);
4401 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4407 ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4408 handle, smid, ioc->tm_tr_volume_cb_idx));
4409 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4410 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
4411 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
4412 mpi_request->DevHandle = cpu_to_le16(handle);
4413 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
4414 ioc->put_smid_hi_priority(ioc, smid, 0);
4418 * _scsih_tm_volume_tr_complete - target reset completion
4419 * @ioc: per adapter object
4420 * @smid: system request message index
4421 * @msix_index: MSIX table index supplied by the OS
4422 * @reply: reply message frame(lower 32bit addr)
4423 * Context: interrupt time.
4425 * Return: 1 meaning mf should be freed from _base_interrupt
4426 * 0 means the mf is freed from this function.
4429 _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4430 u8 msix_index, u32 reply)
4433 Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
4434 Mpi2SCSITaskManagementReply_t *mpi_reply =
4435 mpt3sas_base_get_reply_virt_addr(ioc, reply);
4437 if (ioc->shost_recovery || ioc->pci_error_recovery) {
4439 ioc_info(ioc, "%s: host reset in progress!\n",
4443 if (unlikely(!mpi_reply)) {
4444 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4445 __FILE__, __LINE__, __func__);
4449 mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
4450 handle = le16_to_cpu(mpi_request_tm->DevHandle);
4451 if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
4453 ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
4454 handle, le16_to_cpu(mpi_reply->DevHandle),
4460 ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
4461 handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
4462 le32_to_cpu(mpi_reply->IOCLogInfo),
4463 le32_to_cpu(mpi_reply->TerminationCount)));
4465 return _scsih_check_for_pending_tm(ioc, smid);
4469 * _scsih_issue_delayed_event_ack - issue delayed Event ACK messages
4470 * @ioc: per adapter object
4471 * @smid: system request message index
4473 * @event_context: used to track events uniquely
4475 * Context - processed in interrupt context.
4478 _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event,
4481 Mpi2EventAckRequest_t *ack_request;
4482 int i = smid - ioc->internal_smid;
4483 unsigned long flags;
4485 /* Without releasing the smid just update the
4486 * call back index and reuse the same smid for
4487 * processing this delayed request
4489 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4490 ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx;
4491 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4494 ioc_info(ioc, "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n",
4495 le16_to_cpu(event), smid, ioc->base_cb_idx));
4496 ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
4497 memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
4498 ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
4499 ack_request->Event = event;
4500 ack_request->EventContext = event_context;
4501 ack_request->VF_ID = 0; /* TODO */
4502 ack_request->VP_ID = 0;
4503 ioc->put_smid_default(ioc, smid);
4507 * _scsih_issue_delayed_sas_io_unit_ctrl - issue delayed
4508 * sas_io_unit_ctrl messages
4509 * @ioc: per adapter object
4510 * @smid: system request message index
4511 * @handle: device handle
4513 * Context - processed in interrupt context.
4516 _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
4517 u16 smid, u16 handle)
4519 Mpi2SasIoUnitControlRequest_t *mpi_request;
4521 int i = smid - ioc->internal_smid;
4522 unsigned long flags;
4524 if (ioc->remove_host) {
4526 ioc_info(ioc, "%s: host has been removed\n",
4529 } else if (ioc->pci_error_recovery) {
4531 ioc_info(ioc, "%s: host in pci error recovery\n",
4535 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4536 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4538 ioc_info(ioc, "%s: host is not operational\n",
4543 /* Without releasing the smid just update the
4544 * call back index and reuse the same smid for
4545 * processing this delayed request
4547 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4548 ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx;
4549 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4552 ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4553 handle, smid, ioc->tm_sas_control_cb_idx));
4554 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4555 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4556 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4557 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4558 mpi_request->DevHandle = cpu_to_le16(handle);
4559 ioc->put_smid_default(ioc, smid);
4563 * mpt3sas_check_for_pending_internal_cmds - check for pending internal messages
4564 * @ioc: per adapter object
4565 * @smid: system request message index
4567 * Context: Executed in interrupt context
4569 * This will check delayed internal messages list, and process the
4572 * Return: 1 meaning mf should be freed from _base_interrupt
4573 * 0 means the mf is freed from this function.
4576 mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4578 struct _sc_list *delayed_sc;
4579 struct _event_ack_list *delayed_event_ack;
4581 if (!list_empty(&ioc->delayed_event_ack_list)) {
4582 delayed_event_ack = list_entry(ioc->delayed_event_ack_list.next,
4583 struct _event_ack_list, list);
4584 _scsih_issue_delayed_event_ack(ioc, smid,
4585 delayed_event_ack->Event, delayed_event_ack->EventContext);
4586 list_del(&delayed_event_ack->list);
4587 kfree(delayed_event_ack);
4591 if (!list_empty(&ioc->delayed_sc_list)) {
4592 delayed_sc = list_entry(ioc->delayed_sc_list.next,
4593 struct _sc_list, list);
4594 _scsih_issue_delayed_sas_io_unit_ctrl(ioc, smid,
4595 delayed_sc->handle);
4596 list_del(&delayed_sc->list);
4604 * _scsih_check_for_pending_tm - check for pending task management
4605 * @ioc: per adapter object
4606 * @smid: system request message index
4608 * This will check delayed target reset list, and feed the
4611 * Return: 1 meaning mf should be freed from _base_interrupt
4612 * 0 means the mf is freed from this function.
4615 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4617 struct _tr_list *delayed_tr;
4619 if (!list_empty(&ioc->delayed_tr_volume_list)) {
4620 delayed_tr = list_entry(ioc->delayed_tr_volume_list.next,
4621 struct _tr_list, list);
4622 mpt3sas_base_free_smid(ioc, smid);
4623 _scsih_tm_tr_volume_send(ioc, delayed_tr->handle);
4624 list_del(&delayed_tr->list);
4629 if (!list_empty(&ioc->delayed_tr_list)) {
4630 delayed_tr = list_entry(ioc->delayed_tr_list.next,
4631 struct _tr_list, list);
4632 mpt3sas_base_free_smid(ioc, smid);
4633 _scsih_tm_tr_send(ioc, delayed_tr->handle);
4634 list_del(&delayed_tr->list);
4643 * _scsih_check_topo_delete_events - sanity check on topo events
4644 * @ioc: per adapter object
4645 * @event_data: the event data payload
4647 * This routine added to better handle cable breaker.
4649 * This handles the case where driver receives multiple expander
4650 * add and delete events in a single shot. When there is a delete event
4651 * the routine will void any pending add events waiting in the event queue.
4654 _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
4655 Mpi2EventDataSasTopologyChangeList_t *event_data)
4657 struct fw_event_work *fw_event;
4658 Mpi2EventDataSasTopologyChangeList_t *local_event_data;
4659 u16 expander_handle;
4660 struct _sas_node *sas_expander;
4661 unsigned long flags;
4665 for (i = 0 ; i < event_data->NumEntries; i++) {
4666 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
4669 reason_code = event_data->PHY[i].PhyStatus &
4670 MPI2_EVENT_SAS_TOPO_RC_MASK;
4671 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)
4672 _scsih_tm_tr_send(ioc, handle);
4675 expander_handle = le16_to_cpu(event_data->ExpanderDevHandle);
4676 if (expander_handle < ioc->sas_hba.num_phys) {
4677 _scsih_block_io_to_children_attached_directly(ioc, event_data);
4680 if (event_data->ExpStatus ==
4681 MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) {
4682 /* put expander attached devices into blocking state */
4683 spin_lock_irqsave(&ioc->sas_node_lock, flags);
4684 sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
4686 _scsih_block_io_to_children_attached_to_ex(ioc, sas_expander);
4687 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
4689 handle = find_first_bit(ioc->blocking_handles,
4690 ioc->facts.MaxDevHandle);
4691 if (handle < ioc->facts.MaxDevHandle)
4692 _scsih_block_io_device(ioc, handle);
4693 } while (test_and_clear_bit(handle, ioc->blocking_handles));
4694 } else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING)
4695 _scsih_block_io_to_children_attached_directly(ioc, event_data);
4697 if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4700 /* mark ignore flag for pending events */
4701 spin_lock_irqsave(&ioc->fw_event_lock, flags);
4702 list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4703 if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
4706 local_event_data = (Mpi2EventDataSasTopologyChangeList_t *)
4707 fw_event->event_data;
4708 if (local_event_data->ExpStatus ==
4709 MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4710 local_event_data->ExpStatus ==
4711 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4712 if (le16_to_cpu(local_event_data->ExpanderDevHandle) ==
4715 ioc_info(ioc, "setting ignoring flag\n"));
4716 fw_event->ignore = 1;
4720 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4724 * _scsih_check_pcie_topo_remove_events - sanity check on topo
4726 * @ioc: per adapter object
4727 * @event_data: the event data payload
4729 * This handles the case where driver receives multiple switch
4730 * or device add and delete events in a single shot. When there
4731 * is a delete event the routine will void any pending add
4732 * events waiting in the event queue.
4735 _scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc,
4736 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
4738 struct fw_event_work *fw_event;
4739 Mpi26EventDataPCIeTopologyChangeList_t *local_event_data;
4740 unsigned long flags;
4742 u16 handle, switch_handle;
4744 for (i = 0; i < event_data->NumEntries; i++) {
4746 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
4749 reason_code = event_data->PortEntry[i].PortStatus;
4750 if (reason_code == MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING)
4751 _scsih_tm_tr_send(ioc, handle);
4754 switch_handle = le16_to_cpu(event_data->SwitchDevHandle);
4755 if (!switch_handle) {
4756 _scsih_block_io_to_pcie_children_attached_directly(
4760 /* TODO We are not supporting cascaded PCIe Switch removal yet*/
4761 if ((event_data->SwitchStatus
4762 == MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING) ||
4763 (event_data->SwitchStatus ==
4764 MPI26_EVENT_PCIE_TOPO_SS_RESPONDING))
4765 _scsih_block_io_to_pcie_children_attached_directly(
4768 if (event_data->SwitchStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4771 /* mark ignore flag for pending events */
4772 spin_lock_irqsave(&ioc->fw_event_lock, flags);
4773 list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4774 if (fw_event->event != MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ||
4778 (Mpi26EventDataPCIeTopologyChangeList_t *)
4779 fw_event->event_data;
4780 if (local_event_data->SwitchStatus ==
4781 MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4782 local_event_data->SwitchStatus ==
4783 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4784 if (le16_to_cpu(local_event_data->SwitchDevHandle) ==
4787 ioc_info(ioc, "setting ignoring flag for switch event\n"));
4788 fw_event->ignore = 1;
4792 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4796 * _scsih_set_volume_delete_flag - setting volume delete flag
4797 * @ioc: per adapter object
4798 * @handle: device handle
4800 * This returns nothing.
4803 _scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4805 struct _raid_device *raid_device;
4806 struct MPT3SAS_TARGET *sas_target_priv_data;
4807 unsigned long flags;
4809 spin_lock_irqsave(&ioc->raid_device_lock, flags);
4810 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
4811 if (raid_device && raid_device->starget &&
4812 raid_device->starget->hostdata) {
4813 sas_target_priv_data =
4814 raid_device->starget->hostdata;
4815 sas_target_priv_data->deleted = 1;
4817 ioc_info(ioc, "setting delete flag: handle(0x%04x), wwid(0x%016llx)\n",
4818 handle, (u64)raid_device->wwid));
4820 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
4824 * _scsih_set_volume_handle_for_tr - set handle for target reset to volume
4825 * @handle: input handle
4826 * @a: handle for volume a
4827 * @b: handle for volume b
4829 * IR firmware only supports two raid volumes. The purpose of this
4830 * routine is to set the volume handle in either a or b. When the given
4831 * input handle is non-zero, or when a and b have not been set before.
4834 _scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b)
4836 if (!handle || handle == *a || handle == *b)
4845 * _scsih_check_ir_config_unhide_events - check for UNHIDE events
4846 * @ioc: per adapter object
4847 * @event_data: the event data payload
4848 * Context: interrupt time.
4850 * This routine will send target reset to volume, followed by target
4851 * resets to the PDs. This is called when a PD has been removed, or
4852 * volume has been deleted or removed. When the target reset is sent
4853 * to volume, the PD target resets need to be queued to start upon
4854 * completion of the volume target reset.
4857 _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
4858 Mpi2EventDataIrConfigChangeList_t *event_data)
4860 Mpi2EventIrConfigElement_t *element;
4862 u16 handle, volume_handle, a, b;
4863 struct _tr_list *delayed_tr;
4868 if (ioc->is_warpdrive)
4871 /* Volume Resets for Deleted or Removed */
4872 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4873 for (i = 0; i < event_data->NumElements; i++, element++) {
4874 if (le32_to_cpu(event_data->Flags) &
4875 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4877 if (element->ReasonCode ==
4878 MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED ||
4879 element->ReasonCode ==
4880 MPI2_EVENT_IR_CHANGE_RC_REMOVED) {
4881 volume_handle = le16_to_cpu(element->VolDevHandle);
4882 _scsih_set_volume_delete_flag(ioc, volume_handle);
4883 _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4887 /* Volume Resets for UNHIDE events */
4888 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4889 for (i = 0; i < event_data->NumElements; i++, element++) {
4890 if (le32_to_cpu(event_data->Flags) &
4891 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4893 if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) {
4894 volume_handle = le16_to_cpu(element->VolDevHandle);
4895 _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4900 _scsih_tm_tr_volume_send(ioc, a);
4902 _scsih_tm_tr_volume_send(ioc, b);
4904 /* PD target resets */
4905 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4906 for (i = 0; i < event_data->NumElements; i++, element++) {
4907 if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE)
4909 handle = le16_to_cpu(element->PhysDiskDevHandle);
4910 volume_handle = le16_to_cpu(element->VolDevHandle);
4911 clear_bit(handle, ioc->pd_handles);
4913 _scsih_tm_tr_send(ioc, handle);
4914 else if (volume_handle == a || volume_handle == b) {
4915 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4916 BUG_ON(!delayed_tr);
4917 INIT_LIST_HEAD(&delayed_tr->list);
4918 delayed_tr->handle = handle;
4919 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
4921 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4924 _scsih_tm_tr_send(ioc, handle);
4930 * _scsih_check_volume_delete_events - set delete flag for volumes
4931 * @ioc: per adapter object
4932 * @event_data: the event data payload
4933 * Context: interrupt time.
4935 * This will handle the case when the cable connected to entire volume is
4936 * pulled. We will take care of setting the deleted flag so normal IO will
4940 _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc,
4941 Mpi2EventDataIrVolume_t *event_data)
4945 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
4947 state = le32_to_cpu(event_data->NewValue);
4948 if (state == MPI2_RAID_VOL_STATE_MISSING || state ==
4949 MPI2_RAID_VOL_STATE_FAILED)
4950 _scsih_set_volume_delete_flag(ioc,
4951 le16_to_cpu(event_data->VolDevHandle));
4955 * _scsih_temp_threshold_events - display temperature threshold exceeded events
4956 * @ioc: per adapter object
4957 * @event_data: the temp threshold event data
4958 * Context: interrupt time.
4961 _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
4962 Mpi2EventDataTemperature_t *event_data)
4965 if (ioc->temp_sensors_count >= event_data->SensorNum) {
4966 ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n",
4967 le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ",
4968 le16_to_cpu(event_data->Status) & 0x2 ? "1 " : " ",
4969 le16_to_cpu(event_data->Status) & 0x4 ? "2 " : " ",
4970 le16_to_cpu(event_data->Status) & 0x8 ? "3 " : " ",
4971 event_data->SensorNum);
4972 ioc_err(ioc, "Current Temp In Celsius: %d\n",
4973 event_data->CurrentTemperature);
4974 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
4975 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
4976 if ((doorbell & MPI2_IOC_STATE_MASK) ==
4977 MPI2_IOC_STATE_FAULT) {
4978 mpt3sas_print_fault_code(ioc,
4979 doorbell & MPI2_DOORBELL_DATA_MASK);
4980 } else if ((doorbell & MPI2_IOC_STATE_MASK) ==
4981 MPI2_IOC_STATE_COREDUMP) {
4982 mpt3sas_print_coredump_info(ioc,
4983 doorbell & MPI2_DOORBELL_DATA_MASK);
4989 static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
4991 struct MPT3SAS_DEVICE *priv = scmd->device->hostdata;
4993 if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16)
4997 return test_and_set_bit(0, &priv->ata_command_pending);
4999 clear_bit(0, &priv->ata_command_pending);
5004 * _scsih_flush_running_cmds - completing outstanding commands.
5005 * @ioc: per adapter object
5007 * The flushing out of all pending scmd commands following host reset,
5008 * where all IO is dropped to the floor.
5011 _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
5013 struct scsi_cmnd *scmd;
5014 struct scsiio_tracker *st;
5018 for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
5019 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
5023 _scsih_set_satl_pending(scmd, false);
5024 st = scsi_cmd_priv(scmd);
5025 mpt3sas_base_clear_st(ioc, st);
5026 scsi_dma_unmap(scmd);
5027 if (ioc->pci_error_recovery || ioc->remove_host)
5028 scmd->result = DID_NO_CONNECT << 16;
5030 scmd->result = DID_RESET << 16;
5033 dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count));
5037 * _scsih_setup_eedp - setup MPI request for EEDP transfer
5038 * @ioc: per adapter object
5039 * @scmd: pointer to scsi command object
5040 * @mpi_request: pointer to the SCSI_IO request message frame
5042 * Supporting protection 1 and 3.
5045 _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
5046 Mpi25SCSIIORequest_t *mpi_request)
5049 Mpi25SCSIIORequest_t *mpi_request_3v =
5050 (Mpi25SCSIIORequest_t *)mpi_request;
5052 switch (scsi_get_prot_op(scmd)) {
5053 case SCSI_PROT_READ_STRIP:
5054 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
5056 case SCSI_PROT_WRITE_INSERT:
5057 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
5063 if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK)
5064 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
5066 if (scmd->prot_flags & SCSI_PROT_REF_CHECK)
5067 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG;
5069 if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT) {
5070 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG;
5072 mpi_request->CDB.EEDP32.PrimaryReferenceTag =
5073 cpu_to_be32(scsi_prot_ref_tag(scmd));
5076 mpi_request_3v->EEDPBlockSize = cpu_to_le16(scsi_prot_interval(scmd));
5078 if (ioc->is_gen35_ioc)
5079 eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
5080 mpi_request->EEDPFlags = cpu_to_le16(eedp_flags);
5084 * _scsih_eedp_error_handling - return sense code for EEDP errors
5085 * @scmd: pointer to scsi command object
5086 * @ioc_status: ioc status
5089 _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
5093 switch (ioc_status) {
5094 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5097 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5100 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5107 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x10, ascq);
5108 set_host_byte(scmd, DID_ABORT);
5112 * scsih_qcmd - main scsi request entry point
5113 * @shost: SCSI host pointer
5114 * @scmd: pointer to scsi command object
5116 * The callback index is set inside `ioc->scsi_io_cb_idx`.
5118 * Return: 0 on success. If there's a failure, return either:
5119 * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or
5120 * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
5123 scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
5125 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
5126 struct MPT3SAS_DEVICE *sas_device_priv_data;
5127 struct MPT3SAS_TARGET *sas_target_priv_data;
5128 struct _raid_device *raid_device;
5129 struct request *rq = scsi_cmd_to_rq(scmd);
5131 Mpi25SCSIIORequest_t *mpi_request;
5132 struct _pcie_device *pcie_device = NULL;
5137 if (ioc->logging_level & MPT_DEBUG_SCSI)
5138 scsi_print_command(scmd);
5140 sas_device_priv_data = scmd->device->hostdata;
5141 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
5142 scmd->result = DID_NO_CONNECT << 16;
5147 if (!(_scsih_allow_scmd_to_device(ioc, scmd))) {
5148 scmd->result = DID_NO_CONNECT << 16;
5153 sas_target_priv_data = sas_device_priv_data->sas_target;
5155 /* invalid device handle */
5156 handle = sas_target_priv_data->handle;
5159 * Avoid error handling escallation when device is disconnected
5161 if (handle == MPT3SAS_INVALID_DEVICE_HANDLE || sas_device_priv_data->block) {
5162 if (scmd->device->host->shost_state == SHOST_RECOVERY &&
5163 scmd->cmnd[0] == TEST_UNIT_READY) {
5164 scsi_build_sense(scmd, 0, UNIT_ATTENTION, 0x29, 0x07);
5170 if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) {
5171 scmd->result = DID_NO_CONNECT << 16;
5177 if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) {
5178 /* host recovery or link resets sent via IOCTLs */
5179 return SCSI_MLQUEUE_HOST_BUSY;
5180 } else if (sas_target_priv_data->deleted) {
5181 /* device has been deleted */
5182 scmd->result = DID_NO_CONNECT << 16;
5185 } else if (sas_target_priv_data->tm_busy ||
5186 sas_device_priv_data->block) {
5187 /* device busy with task management */
5188 return SCSI_MLQUEUE_DEVICE_BUSY;
5192 * Bug work around for firmware SATL handling. The loop
5193 * is based on atomic operations and ensures consistency
5194 * since we're lockless at this point
5197 if (test_bit(0, &sas_device_priv_data->ata_command_pending))
5198 return SCSI_MLQUEUE_DEVICE_BUSY;
5199 } while (_scsih_set_satl_pending(scmd, true));
5201 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
5202 mpi_control = MPI2_SCSIIO_CONTROL_READ;
5203 else if (scmd->sc_data_direction == DMA_TO_DEVICE)
5204 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
5206 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
5209 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
5210 /* NCQ Prio supported, make sure control indicated high priority */
5211 if (sas_device_priv_data->ncq_prio_enable) {
5212 class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
5213 if (class == IOPRIO_CLASS_RT)
5214 mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT;
5216 /* Make sure Device is not raid volume.
5217 * We do not expose raid functionality to upper layer for warpdrive.
5219 if (((!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev))
5220 && !scsih_is_nvme(&scmd->device->sdev_gendev))
5221 && sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
5222 mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
5224 smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
5226 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
5227 _scsih_set_satl_pending(scmd, false);
5230 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5231 memset(mpi_request, 0, ioc->request_sz);
5232 _scsih_setup_eedp(ioc, scmd, mpi_request);
5234 if (scmd->cmd_len == 32)
5235 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
5236 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
5237 if (sas_device_priv_data->sas_target->flags &
5238 MPT_TARGET_FLAGS_RAID_COMPONENT)
5239 mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
5241 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
5242 mpi_request->DevHandle = cpu_to_le16(handle);
5243 mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
5244 mpi_request->Control = cpu_to_le32(mpi_control);
5245 mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len);
5246 mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR;
5247 mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
5248 mpi_request->SenseBufferLowAddress =
5249 mpt3sas_base_get_sense_buffer_dma(ioc, smid);
5250 mpi_request->SGLOffset0 = offsetof(Mpi25SCSIIORequest_t, SGL) / 4;
5251 int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *)
5253 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5255 if (mpi_request->DataLength) {
5256 pcie_device = sas_target_priv_data->pcie_dev;
5257 if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) {
5258 mpt3sas_base_free_smid(ioc, smid);
5259 _scsih_set_satl_pending(scmd, false);
5263 ioc->build_zero_len_sge(ioc, &mpi_request->SGL);
5265 raid_device = sas_target_priv_data->raid_device;
5266 if (raid_device && raid_device->direct_io_enabled)
5267 mpt3sas_setup_direct_io(ioc, scmd,
5268 raid_device, mpi_request);
5270 if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) {
5271 if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
5272 mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len |
5273 MPI25_SCSIIO_IOFLAGS_FAST_PATH);
5274 ioc->put_smid_fast_path(ioc, smid, handle);
5276 ioc->put_smid_scsi_io(ioc, smid,
5277 le16_to_cpu(mpi_request->DevHandle));
5279 ioc->put_smid_default(ioc, smid);
5283 return SCSI_MLQUEUE_HOST_BUSY;
5287 * _scsih_normalize_sense - normalize descriptor and fixed format sense data
5288 * @sense_buffer: sense data returned by target
5289 * @data: normalized skey/asc/ascq
5292 _scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
5294 if ((sense_buffer[0] & 0x7F) >= 0x72) {
5295 /* descriptor format */
5296 data->skey = sense_buffer[1] & 0x0F;
5297 data->asc = sense_buffer[2];
5298 data->ascq = sense_buffer[3];
5301 data->skey = sense_buffer[2] & 0x0F;
5302 data->asc = sense_buffer[12];
5303 data->ascq = sense_buffer[13];
5308 * _scsih_scsi_ioc_info - translated non-successful SCSI_IO request
5309 * @ioc: per adapter object
5310 * @scmd: pointer to scsi command object
5311 * @mpi_reply: reply mf payload returned from firmware
5314 * scsi_status - SCSI Status code returned from target device
5315 * scsi_state - state info associated with SCSI_IO determined by ioc
5316 * ioc_status - ioc supplied status info
5319 _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
5320 Mpi2SCSIIOReply_t *mpi_reply, u16 smid)
5324 u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
5325 MPI2_IOCSTATUS_MASK;
5326 u8 scsi_state = mpi_reply->SCSIState;
5327 u8 scsi_status = mpi_reply->SCSIStatus;
5328 char *desc_ioc_state = NULL;
5329 char *desc_scsi_status = NULL;
5330 char *desc_scsi_state = ioc->tmp_string;
5331 u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
5332 struct _sas_device *sas_device = NULL;
5333 struct _pcie_device *pcie_device = NULL;
5334 struct scsi_target *starget = scmd->device->sdev_target;
5335 struct MPT3SAS_TARGET *priv_target = starget->hostdata;
5336 char *device_str = NULL;
5340 if (ioc->hide_ir_msg)
5341 device_str = "WarpDrive";
5343 device_str = "volume";
5345 if (log_info == 0x31170000)
5348 switch (ioc_status) {
5349 case MPI2_IOCSTATUS_SUCCESS:
5350 desc_ioc_state = "success";
5352 case MPI2_IOCSTATUS_INVALID_FUNCTION:
5353 desc_ioc_state = "invalid function";
5355 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
5356 desc_ioc_state = "scsi recovered error";
5358 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
5359 desc_ioc_state = "scsi invalid dev handle";
5361 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5362 desc_ioc_state = "scsi device not there";
5364 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5365 desc_ioc_state = "scsi data overrun";
5367 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5368 desc_ioc_state = "scsi data underrun";
5370 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5371 desc_ioc_state = "scsi io data error";
5373 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5374 desc_ioc_state = "scsi protocol error";
5376 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5377 desc_ioc_state = "scsi task terminated";
5379 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5380 desc_ioc_state = "scsi residual mismatch";
5382 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
5383 desc_ioc_state = "scsi task mgmt failed";
5385 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5386 desc_ioc_state = "scsi ioc terminated";
5388 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5389 desc_ioc_state = "scsi ext terminated";
5391 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5392 desc_ioc_state = "eedp guard error";
5394 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5395 desc_ioc_state = "eedp ref tag error";
5397 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5398 desc_ioc_state = "eedp app tag error";
5400 case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
5401 desc_ioc_state = "insufficient power";
5404 desc_ioc_state = "unknown";
5408 switch (scsi_status) {
5409 case MPI2_SCSI_STATUS_GOOD:
5410 desc_scsi_status = "good";
5412 case MPI2_SCSI_STATUS_CHECK_CONDITION:
5413 desc_scsi_status = "check condition";
5415 case MPI2_SCSI_STATUS_CONDITION_MET:
5416 desc_scsi_status = "condition met";
5418 case MPI2_SCSI_STATUS_BUSY:
5419 desc_scsi_status = "busy";
5421 case MPI2_SCSI_STATUS_INTERMEDIATE:
5422 desc_scsi_status = "intermediate";
5424 case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
5425 desc_scsi_status = "intermediate condmet";
5427 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5428 desc_scsi_status = "reservation conflict";
5430 case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
5431 desc_scsi_status = "command terminated";
5433 case MPI2_SCSI_STATUS_TASK_SET_FULL:
5434 desc_scsi_status = "task set full";
5436 case MPI2_SCSI_STATUS_ACA_ACTIVE:
5437 desc_scsi_status = "aca active";
5439 case MPI2_SCSI_STATUS_TASK_ABORTED:
5440 desc_scsi_status = "task aborted";
5443 desc_scsi_status = "unknown";
5447 desc_scsi_state[0] = '\0';
5449 desc_scsi_state = " ";
5450 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5451 strcat(desc_scsi_state, "response info ");
5452 if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5453 strcat(desc_scsi_state, "state terminated ");
5454 if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
5455 strcat(desc_scsi_state, "no status ");
5456 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
5457 strcat(desc_scsi_state, "autosense failed ");
5458 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
5459 strcat(desc_scsi_state, "autosense valid ");
5461 scsi_print_command(scmd);
5463 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
5464 ioc_warn(ioc, "\t%s wwid(0x%016llx)\n",
5465 device_str, (u64)priv_target->sas_address);
5466 } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
5467 pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target);
5469 ioc_info(ioc, "\twwid(0x%016llx), port(%d)\n",
5470 (u64)pcie_device->wwid, pcie_device->port_num);
5471 if (pcie_device->enclosure_handle != 0)
5472 ioc_info(ioc, "\tenclosure logical id(0x%016llx), slot(%d)\n",
5473 (u64)pcie_device->enclosure_logical_id,
5475 if (pcie_device->connector_name[0])
5476 ioc_info(ioc, "\tenclosure level(0x%04x), connector name( %s)\n",
5477 pcie_device->enclosure_level,
5478 pcie_device->connector_name);
5479 pcie_device_put(pcie_device);
5482 sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target);
5484 ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n",
5485 (u64)sas_device->sas_address, sas_device->phy);
5487 _scsih_display_enclosure_chassis_info(ioc, sas_device,
5490 sas_device_put(sas_device);
5494 ioc_warn(ioc, "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
5495 le16_to_cpu(mpi_reply->DevHandle),
5496 desc_ioc_state, ioc_status, smid);
5497 ioc_warn(ioc, "\trequest_len(%d), underflow(%d), resid(%d)\n",
5498 scsi_bufflen(scmd), scmd->underflow, scsi_get_resid(scmd));
5499 ioc_warn(ioc, "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
5500 le16_to_cpu(mpi_reply->TaskTag),
5501 le32_to_cpu(mpi_reply->TransferCount), scmd->result);
5502 ioc_warn(ioc, "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
5503 desc_scsi_status, scsi_status, desc_scsi_state, scsi_state);
5505 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5506 struct sense_info data;
5507 _scsih_normalize_sense(scmd->sense_buffer, &data);
5508 ioc_warn(ioc, "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
5509 data.skey, data.asc, data.ascq,
5510 le32_to_cpu(mpi_reply->SenseCount));
5512 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
5513 response_info = le32_to_cpu(mpi_reply->ResponseInfo);
5514 response_bytes = (u8 *)&response_info;
5515 _scsih_response_code(ioc, response_bytes[0]);
5520 * _scsih_turn_on_pfa_led - illuminate PFA LED
5521 * @ioc: per adapter object
5522 * @handle: device handle
5526 _scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5528 Mpi2SepReply_t mpi_reply;
5529 Mpi2SepRequest_t mpi_request;
5530 struct _sas_device *sas_device;
5532 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
5536 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5537 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5538 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5539 mpi_request.SlotStatus =
5540 cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
5541 mpi_request.DevHandle = cpu_to_le16(handle);
5542 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
5543 if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5544 &mpi_request)) != 0) {
5545 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5546 __FILE__, __LINE__, __func__);
5549 sas_device->pfa_led_on = 1;
5551 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5553 ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5554 le16_to_cpu(mpi_reply.IOCStatus),
5555 le32_to_cpu(mpi_reply.IOCLogInfo)));
5559 sas_device_put(sas_device);
5563 * _scsih_turn_off_pfa_led - turn off Fault LED
5564 * @ioc: per adapter object
5565 * @sas_device: sas device whose PFA LED has to turned off
5569 _scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc,
5570 struct _sas_device *sas_device)
5572 Mpi2SepReply_t mpi_reply;
5573 Mpi2SepRequest_t mpi_request;
5575 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5576 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5577 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5578 mpi_request.SlotStatus = 0;
5579 mpi_request.Slot = cpu_to_le16(sas_device->slot);
5580 mpi_request.DevHandle = 0;
5581 mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle);
5582 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
5583 if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5584 &mpi_request)) != 0) {
5585 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5586 __FILE__, __LINE__, __func__);
5590 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5592 ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5593 le16_to_cpu(mpi_reply.IOCStatus),
5594 le32_to_cpu(mpi_reply.IOCLogInfo)));
5600 * _scsih_send_event_to_turn_on_pfa_led - fire delayed event
5601 * @ioc: per adapter object
5602 * @handle: device handle
5603 * Context: interrupt.
5606 _scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5608 struct fw_event_work *fw_event;
5610 fw_event = alloc_fw_event_work(0);
5613 fw_event->event = MPT3SAS_TURN_ON_PFA_LED;
5614 fw_event->device_handle = handle;
5615 fw_event->ioc = ioc;
5616 _scsih_fw_event_add(ioc, fw_event);
5617 fw_event_work_put(fw_event);
5621 * _scsih_smart_predicted_fault - process smart errors
5622 * @ioc: per adapter object
5623 * @handle: device handle
5624 * Context: interrupt.
5627 _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5629 struct scsi_target *starget;
5630 struct MPT3SAS_TARGET *sas_target_priv_data;
5631 Mpi2EventNotificationReply_t *event_reply;
5632 Mpi2EventDataSasDeviceStatusChange_t *event_data;
5633 struct _sas_device *sas_device;
5635 unsigned long flags;
5637 /* only handle non-raid devices */
5638 spin_lock_irqsave(&ioc->sas_device_lock, flags);
5639 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
5643 starget = sas_device->starget;
5644 sas_target_priv_data = starget->hostdata;
5646 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) ||
5647 ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)))
5650 _scsih_display_enclosure_chassis_info(NULL, sas_device, NULL, starget);
5652 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5654 if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
5655 _scsih_send_event_to_turn_on_pfa_led(ioc, handle);
5657 /* insert into event log */
5658 sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
5659 sizeof(Mpi2EventDataSasDeviceStatusChange_t);
5660 event_reply = kzalloc(sz, GFP_ATOMIC);
5662 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5663 __FILE__, __LINE__, __func__);
5667 event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
5668 event_reply->Event =
5669 cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
5670 event_reply->MsgLength = sz/4;
5671 event_reply->EventDataLength =
5672 cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4);
5673 event_data = (Mpi2EventDataSasDeviceStatusChange_t *)
5674 event_reply->EventData;
5675 event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA;
5676 event_data->ASC = 0x5D;
5677 event_data->DevHandle = cpu_to_le16(handle);
5678 event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address);
5679 mpt3sas_ctl_add_to_event_log(ioc, event_reply);
5683 sas_device_put(sas_device);
5687 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5692 * _scsih_io_done - scsi request callback
5693 * @ioc: per adapter object
5694 * @smid: system request message index
5695 * @msix_index: MSIX table index supplied by the OS
5696 * @reply: reply message frame(lower 32bit addr)
5698 * Callback handler when using _scsih_qcmd.
5700 * Return: 1 meaning mf should be freed from _base_interrupt
5701 * 0 means the mf is freed from this function.
5704 _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
5706 Mpi25SCSIIORequest_t *mpi_request;
5707 Mpi2SCSIIOReply_t *mpi_reply;
5708 struct scsi_cmnd *scmd;
5709 struct scsiio_tracker *st;
5715 struct MPT3SAS_DEVICE *sas_device_priv_data;
5716 u32 response_code = 0;
5718 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
5720 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
5724 _scsih_set_satl_pending(scmd, false);
5726 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5728 if (mpi_reply == NULL) {
5729 scmd->result = DID_OK << 16;
5733 sas_device_priv_data = scmd->device->hostdata;
5734 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
5735 sas_device_priv_data->sas_target->deleted) {
5736 scmd->result = DID_NO_CONNECT << 16;
5739 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
5742 * WARPDRIVE: If direct_io is set then it is directIO,
5743 * the failed direct I/O should be redirected to volume
5745 st = scsi_cmd_priv(scmd);
5746 if (st->direct_io &&
5747 ((ioc_status & MPI2_IOCSTATUS_MASK)
5748 != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
5751 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5752 mpi_request->DevHandle =
5753 cpu_to_le16(sas_device_priv_data->sas_target->handle);
5754 ioc->put_smid_scsi_io(ioc, smid,
5755 sas_device_priv_data->sas_target->handle);
5758 /* turning off TLR */
5759 scsi_state = mpi_reply->SCSIState;
5760 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5762 le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
5763 if (!sas_device_priv_data->tlr_snoop_check) {
5764 sas_device_priv_data->tlr_snoop_check++;
5765 if ((!ioc->is_warpdrive &&
5766 !scsih_is_raid(&scmd->device->sdev_gendev) &&
5767 !scsih_is_nvme(&scmd->device->sdev_gendev))
5768 && sas_is_tlr_enabled(scmd->device) &&
5769 response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) {
5770 sas_disable_tlr(scmd->device);
5771 sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n");
5775 xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
5776 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
5777 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
5778 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
5781 ioc_status &= MPI2_IOCSTATUS_MASK;
5782 scsi_status = mpi_reply->SCSIStatus;
5784 if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 &&
5785 (scsi_status == MPI2_SCSI_STATUS_BUSY ||
5786 scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT ||
5787 scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) {
5788 ioc_status = MPI2_IOCSTATUS_SUCCESS;
5791 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5792 struct sense_info data;
5793 const void *sense_data = mpt3sas_base_get_sense_buffer(ioc,
5795 u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
5796 le32_to_cpu(mpi_reply->SenseCount));
5797 memcpy(scmd->sense_buffer, sense_data, sz);
5798 _scsih_normalize_sense(scmd->sense_buffer, &data);
5799 /* failure prediction threshold exceeded */
5800 if (data.asc == 0x5D)
5801 _scsih_smart_predicted_fault(ioc,
5802 le16_to_cpu(mpi_reply->DevHandle));
5803 mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq);
5805 if ((ioc->logging_level & MPT_DEBUG_REPLY) &&
5806 ((scmd->sense_buffer[2] == UNIT_ATTENTION) ||
5807 (scmd->sense_buffer[2] == MEDIUM_ERROR) ||
5808 (scmd->sense_buffer[2] == HARDWARE_ERROR)))
5809 _scsih_scsi_ioc_info(ioc, scmd, mpi_reply, smid);
5811 switch (ioc_status) {
5812 case MPI2_IOCSTATUS_BUSY:
5813 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5814 scmd->result = SAM_STAT_BUSY;
5817 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5818 scmd->result = DID_NO_CONNECT << 16;
5821 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5822 if (sas_device_priv_data->block) {
5823 scmd->result = DID_TRANSPORT_DISRUPTED << 16;
5826 if (log_info == 0x31110630) {
5827 if (scmd->retries > 2) {
5828 scmd->result = DID_NO_CONNECT << 16;
5829 scsi_device_set_state(scmd->device,
5832 scmd->result = DID_SOFT_ERROR << 16;
5833 scmd->device->expecting_cc_ua = 1;
5836 } else if (log_info == VIRTUAL_IO_FAILED_RETRY) {
5837 scmd->result = DID_RESET << 16;
5839 } else if ((scmd->device->channel == RAID_CHANNEL) &&
5840 (scsi_state == (MPI2_SCSI_STATE_TERMINATED |
5841 MPI2_SCSI_STATE_NO_SCSI_STATUS))) {
5842 scmd->result = DID_RESET << 16;
5845 scmd->result = DID_SOFT_ERROR << 16;
5847 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5848 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5849 scmd->result = DID_RESET << 16;
5852 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5853 if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt))
5854 scmd->result = DID_SOFT_ERROR << 16;
5856 scmd->result = (DID_OK << 16) | scsi_status;
5859 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5860 scmd->result = (DID_OK << 16) | scsi_status;
5862 if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID))
5865 if (xfer_cnt < scmd->underflow) {
5866 if (scsi_status == SAM_STAT_BUSY)
5867 scmd->result = SAM_STAT_BUSY;
5869 scmd->result = DID_SOFT_ERROR << 16;
5870 } else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5871 MPI2_SCSI_STATE_NO_SCSI_STATUS))
5872 scmd->result = DID_SOFT_ERROR << 16;
5873 else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5874 scmd->result = DID_RESET << 16;
5875 else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) {
5876 mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID;
5877 mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION;
5878 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST,
5883 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5884 scsi_set_resid(scmd, 0);
5886 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
5887 case MPI2_IOCSTATUS_SUCCESS:
5888 scmd->result = (DID_OK << 16) | scsi_status;
5889 if (response_code ==
5890 MPI2_SCSITASKMGMT_RSP_INVALID_FRAME ||
5891 (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5892 MPI2_SCSI_STATE_NO_SCSI_STATUS)))
5893 scmd->result = DID_SOFT_ERROR << 16;
5894 else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5895 scmd->result = DID_RESET << 16;
5898 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5899 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5900 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5901 _scsih_eedp_error_handling(scmd, ioc_status);
5904 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5905 case MPI2_IOCSTATUS_INVALID_FUNCTION:
5906 case MPI2_IOCSTATUS_INVALID_SGL:
5907 case MPI2_IOCSTATUS_INTERNAL_ERROR:
5908 case MPI2_IOCSTATUS_INVALID_FIELD:
5909 case MPI2_IOCSTATUS_INVALID_STATE:
5910 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5911 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
5912 case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
5914 scmd->result = DID_SOFT_ERROR << 16;
5919 if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY))
5920 _scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid);
5924 scsi_dma_unmap(scmd);
5925 mpt3sas_base_free_smid(ioc, smid);
5931 * _scsih_update_vphys_after_reset - update the Port's
5932 * vphys_list after reset
5933 * @ioc: per adapter object
5938 _scsih_update_vphys_after_reset(struct MPT3SAS_ADAPTER *ioc)
5942 Mpi2ConfigReply_t mpi_reply;
5943 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
5944 u16 attached_handle;
5945 u64 attached_sas_addr;
5946 u8 found = 0, port_id;
5947 Mpi2SasPhyPage0_t phy_pg0;
5948 struct hba_port *port, *port_next, *mport;
5949 struct virtual_phy *vphy, *vphy_next;
5950 struct _sas_device *sas_device;
5953 * Mark all the vphys objects as dirty.
5955 list_for_each_entry_safe(port, port_next,
5956 &ioc->port_table_list, list) {
5957 if (!port->vphys_mask)
5959 list_for_each_entry_safe(vphy, vphy_next,
5960 &port->vphys_list, list) {
5961 vphy->flags |= MPT_VPHY_FLAG_DIRTY_PHY;
5966 * Read SASIOUnitPage0 to get each HBA Phy's data.
5968 sz = struct_size(sas_iounit_pg0, PhyData, ioc->sas_hba.num_phys);
5969 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
5970 if (!sas_iounit_pg0) {
5971 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5972 __FILE__, __LINE__, __func__);
5975 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
5976 sas_iounit_pg0, sz)) != 0)
5978 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
5979 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
5982 * Loop over each HBA Phy.
5984 for (i = 0; i < ioc->sas_hba.num_phys; i++) {
5986 * Check whether Phy's Negotiation Link Rate is > 1.5G or not.
5988 if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) <
5989 MPI2_SAS_NEG_LINK_RATE_1_5)
5992 * Check whether Phy is connected to SEP device or not,
5993 * if it is SEP device then read the Phy's SASPHYPage0 data to
5994 * determine whether Phy is a virtual Phy or not. if it is
5995 * virtual phy then it is conformed that the attached remote
5996 * device is a HBA's vSES device.
5999 sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) &
6000 MPI2_SAS_DEVICE_INFO_SEP))
6003 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
6005 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6006 __FILE__, __LINE__, __func__);
6010 if (!(le32_to_cpu(phy_pg0.PhyInfo) &
6011 MPI2_SAS_PHYINFO_VIRTUAL_PHY))
6014 * Get the vSES device's SAS Address.
6016 attached_handle = le16_to_cpu(
6017 sas_iounit_pg0->PhyData[i].AttachedDevHandle);
6018 if (_scsih_get_sas_address(ioc, attached_handle,
6019 &attached_sas_addr) != 0) {
6020 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6021 __FILE__, __LINE__, __func__);
6026 port = port_next = NULL;
6028 * Loop over each virtual_phy object from
6029 * each port's vphys_list.
6031 list_for_each_entry_safe(port,
6032 port_next, &ioc->port_table_list, list) {
6033 if (!port->vphys_mask)
6035 list_for_each_entry_safe(vphy, vphy_next,
6036 &port->vphys_list, list) {
6038 * Continue with next virtual_phy object
6039 * if the object is not marked as dirty.
6041 if (!(vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY))
6045 * Continue with next virtual_phy object
6046 * if the object's SAS Address is not equals
6047 * to current Phy's vSES device SAS Address.
6049 if (vphy->sas_address != attached_sas_addr)
6052 * Enable current Phy number bit in object's
6055 if (!(vphy->phy_mask & (1 << i)))
6056 vphy->phy_mask = (1 << i);
6058 * Get hba_port object from hba_port table
6059 * corresponding to current phy's Port ID.
6060 * if there is no hba_port object corresponding
6061 * to Phy's Port ID then create a new hba_port
6062 * object & add to hba_port table.
6064 port_id = sas_iounit_pg0->PhyData[i].Port;
6065 mport = mpt3sas_get_port_by_id(ioc, port_id, 1);
6068 sizeof(struct hba_port), GFP_KERNEL);
6071 mport->port_id = port_id;
6073 "%s: hba_port entry: %p, port: %d is added to hba_port list\n",
6074 __func__, mport, mport->port_id);
6075 list_add_tail(&mport->list,
6076 &ioc->port_table_list);
6079 * If mport & port pointers are not pointing to
6080 * same hba_port object then it means that vSES
6081 * device's Port ID got changed after reset and
6082 * hence move current virtual_phy object from
6083 * port's vphys_list to mport's vphys_list.
6085 if (port != mport) {
6086 if (!mport->vphys_mask)
6088 &mport->vphys_list);
6089 mport->vphys_mask |= (1 << i);
6090 port->vphys_mask &= ~(1 << i);
6091 list_move(&vphy->list,
6092 &mport->vphys_list);
6093 sas_device = mpt3sas_get_sdev_by_addr(
6094 ioc, attached_sas_addr, port);
6096 sas_device->port = mport;
6099 * Earlier while updating the hba_port table,
6100 * it is determined that there is no other
6101 * direct attached device with mport's Port ID,
6102 * Hence mport was marked as dirty. Only vSES
6103 * device has this Port ID, so unmark the mport
6106 if (mport->flags & HBA_PORT_FLAG_DIRTY_PORT) {
6107 mport->sas_address = 0;
6108 mport->phy_mask = 0;
6110 ~HBA_PORT_FLAG_DIRTY_PORT;
6113 * Unmark current virtual_phy object as dirty.
6115 vphy->flags &= ~MPT_VPHY_FLAG_DIRTY_PHY;
6124 kfree(sas_iounit_pg0);
6128 * _scsih_get_port_table_after_reset - Construct temporary port table
6129 * @ioc: per adapter object
6130 * @port_table: address where port table needs to be constructed
6132 * return number of HBA port entries available after reset.
6135 _scsih_get_port_table_after_reset(struct MPT3SAS_ADAPTER *ioc,
6136 struct hba_port *port_table)
6140 Mpi2ConfigReply_t mpi_reply;
6141 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6142 u16 attached_handle;
6143 u64 attached_sas_addr;
6144 u8 found = 0, port_count = 0, port_id;
6146 sz = struct_size(sas_iounit_pg0, PhyData, ioc->sas_hba.num_phys);
6147 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6148 if (!sas_iounit_pg0) {
6149 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6150 __FILE__, __LINE__, __func__);
6154 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6155 sas_iounit_pg0, sz)) != 0)
6157 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6158 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6160 for (i = 0; i < ioc->sas_hba.num_phys; i++) {
6162 if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) <
6163 MPI2_SAS_NEG_LINK_RATE_1_5)
6166 le16_to_cpu(sas_iounit_pg0->PhyData[i].AttachedDevHandle);
6167 if (_scsih_get_sas_address(
6168 ioc, attached_handle, &attached_sas_addr) != 0) {
6169 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6170 __FILE__, __LINE__, __func__);
6174 for (j = 0; j < port_count; j++) {
6175 port_id = sas_iounit_pg0->PhyData[i].Port;
6176 if (port_table[j].port_id == port_id &&
6177 port_table[j].sas_address == attached_sas_addr) {
6178 port_table[j].phy_mask |= (1 << i);
6187 port_id = sas_iounit_pg0->PhyData[i].Port;
6188 port_table[port_count].port_id = port_id;
6189 port_table[port_count].phy_mask = (1 << i);
6190 port_table[port_count].sas_address = attached_sas_addr;
6194 kfree(sas_iounit_pg0);
6198 enum hba_port_matched_codes {
6200 MATCHED_WITH_ADDR_AND_PHYMASK,
6201 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT,
6202 MATCHED_WITH_ADDR_AND_SUBPHYMASK,
6207 * _scsih_look_and_get_matched_port_entry - Get matched hba port entry
6208 * from HBA port table
6209 * @ioc: per adapter object
6210 * @port_entry: hba port entry from temporary port table which needs to be
6211 * searched for matched entry in the HBA port table
6212 * @matched_port_entry: save matched hba port entry here
6213 * @count: count of matched entries
6215 * return type of matched entry found.
6217 static enum hba_port_matched_codes
6218 _scsih_look_and_get_matched_port_entry(struct MPT3SAS_ADAPTER *ioc,
6219 struct hba_port *port_entry,
6220 struct hba_port **matched_port_entry, int *count)
6222 struct hba_port *port_table_entry, *matched_port = NULL;
6223 enum hba_port_matched_codes matched_code = NOT_MATCHED;
6225 *matched_port_entry = NULL;
6227 list_for_each_entry(port_table_entry, &ioc->port_table_list, list) {
6228 if (!(port_table_entry->flags & HBA_PORT_FLAG_DIRTY_PORT))
6231 if ((port_table_entry->sas_address == port_entry->sas_address)
6232 && (port_table_entry->phy_mask == port_entry->phy_mask)) {
6233 matched_code = MATCHED_WITH_ADDR_AND_PHYMASK;
6234 matched_port = port_table_entry;
6238 if ((port_table_entry->sas_address == port_entry->sas_address)
6239 && (port_table_entry->phy_mask & port_entry->phy_mask)
6240 && (port_table_entry->port_id == port_entry->port_id)) {
6241 matched_code = MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT;
6242 matched_port = port_table_entry;
6246 if ((port_table_entry->sas_address == port_entry->sas_address)
6247 && (port_table_entry->phy_mask & port_entry->phy_mask)) {
6249 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT)
6251 matched_code = MATCHED_WITH_ADDR_AND_SUBPHYMASK;
6252 matched_port = port_table_entry;
6256 if (port_table_entry->sas_address == port_entry->sas_address) {
6258 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT)
6260 if (matched_code == MATCHED_WITH_ADDR_AND_SUBPHYMASK)
6262 matched_code = MATCHED_WITH_ADDR;
6263 matched_port = port_table_entry;
6268 *matched_port_entry = matched_port;
6269 if (matched_code == MATCHED_WITH_ADDR)
6271 return matched_code;
6275 * _scsih_del_phy_part_of_anther_port - remove phy if it
6276 * is a part of anther port
6277 *@ioc: per adapter object
6278 *@port_table: port table after reset
6279 *@index: hba port entry index
6280 *@port_count: number of ports available after host reset
6281 *@offset: HBA phy bit offset
6285 _scsih_del_phy_part_of_anther_port(struct MPT3SAS_ADAPTER *ioc,
6286 struct hba_port *port_table,
6287 int index, u8 port_count, int offset)
6289 struct _sas_node *sas_node = &ioc->sas_hba;
6292 for (i = 0; i < port_count; i++) {
6296 if (port_table[i].phy_mask & (1 << offset)) {
6297 mpt3sas_transport_del_phy_from_an_existing_port(
6298 ioc, sas_node, &sas_node->phy[offset]);
6304 port_table[index].phy_mask |= (1 << offset);
6308 * _scsih_add_or_del_phys_from_existing_port - add/remove phy to/from
6310 *@ioc: per adapter object
6311 *@hba_port_entry: hba port table entry
6312 *@port_table: temporary port table
6313 *@index: hba port entry index
6314 *@port_count: number of ports available after host reset
6318 _scsih_add_or_del_phys_from_existing_port(struct MPT3SAS_ADAPTER *ioc,
6319 struct hba_port *hba_port_entry, struct hba_port *port_table,
6320 int index, int port_count)
6322 u32 phy_mask, offset = 0;
6323 struct _sas_node *sas_node = &ioc->sas_hba;
6325 phy_mask = hba_port_entry->phy_mask ^ port_table[index].phy_mask;
6327 for (offset = 0; offset < ioc->sas_hba.num_phys; offset++) {
6328 if (phy_mask & (1 << offset)) {
6329 if (!(port_table[index].phy_mask & (1 << offset))) {
6330 _scsih_del_phy_part_of_anther_port(
6331 ioc, port_table, index, port_count,
6335 if (sas_node->phy[offset].phy_belongs_to_port)
6336 mpt3sas_transport_del_phy_from_an_existing_port(
6337 ioc, sas_node, &sas_node->phy[offset]);
6338 mpt3sas_transport_add_phy_to_an_existing_port(
6339 ioc, sas_node, &sas_node->phy[offset],
6340 hba_port_entry->sas_address,
6347 * _scsih_del_dirty_vphy - delete virtual_phy objects marked as dirty.
6348 * @ioc: per adapter object
6353 _scsih_del_dirty_vphy(struct MPT3SAS_ADAPTER *ioc)
6355 struct hba_port *port, *port_next;
6356 struct virtual_phy *vphy, *vphy_next;
6358 list_for_each_entry_safe(port, port_next,
6359 &ioc->port_table_list, list) {
6360 if (!port->vphys_mask)
6362 list_for_each_entry_safe(vphy, vphy_next,
6363 &port->vphys_list, list) {
6364 if (vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY) {
6365 drsprintk(ioc, ioc_info(ioc,
6366 "Deleting vphy %p entry from port id: %d\t, Phy_mask 0x%08x\n",
6367 vphy, port->port_id,
6369 port->vphys_mask &= ~vphy->phy_mask;
6370 list_del(&vphy->list);
6374 if (!port->vphys_mask && !port->sas_address)
6375 port->flags |= HBA_PORT_FLAG_DIRTY_PORT;
6380 * _scsih_del_dirty_port_entries - delete dirty port entries from port list
6382 *@ioc: per adapter object
6386 _scsih_del_dirty_port_entries(struct MPT3SAS_ADAPTER *ioc)
6388 struct hba_port *port, *port_next;
6390 list_for_each_entry_safe(port, port_next,
6391 &ioc->port_table_list, list) {
6392 if (!(port->flags & HBA_PORT_FLAG_DIRTY_PORT) ||
6393 port->flags & HBA_PORT_FLAG_NEW_PORT)
6396 drsprintk(ioc, ioc_info(ioc,
6397 "Deleting port table entry %p having Port: %d\t Phy_mask 0x%08x\n",
6398 port, port->port_id, port->phy_mask));
6399 list_del(&port->list);
6405 * _scsih_sas_port_refresh - Update HBA port table after host reset
6406 * @ioc: per adapter object
6409 _scsih_sas_port_refresh(struct MPT3SAS_ADAPTER *ioc)
6412 struct hba_port *port_table;
6413 struct hba_port *port_table_entry;
6414 struct hba_port *port_entry = NULL;
6415 int i, j, count = 0, lcount = 0;
6420 drsprintk(ioc, ioc_info(ioc,
6421 "updating ports for sas_host(0x%016llx)\n",
6422 (unsigned long long)ioc->sas_hba.sas_address));
6424 mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
6426 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6427 __FILE__, __LINE__, __func__);
6431 if (num_phys > ioc->sas_hba.nr_phys_allocated) {
6432 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6433 __FILE__, __LINE__, __func__);
6436 ioc->sas_hba.num_phys = num_phys;
6438 port_table = kcalloc(ioc->sas_hba.num_phys,
6439 sizeof(struct hba_port), GFP_KERNEL);
6443 port_count = _scsih_get_port_table_after_reset(ioc, port_table);
6447 drsprintk(ioc, ioc_info(ioc, "New Port table\n"));
6448 for (j = 0; j < port_count; j++)
6449 drsprintk(ioc, ioc_info(ioc,
6450 "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n",
6451 port_table[j].port_id,
6452 port_table[j].phy_mask, port_table[j].sas_address));
6454 list_for_each_entry(port_table_entry, &ioc->port_table_list, list)
6455 port_table_entry->flags |= HBA_PORT_FLAG_DIRTY_PORT;
6457 drsprintk(ioc, ioc_info(ioc, "Old Port table\n"));
6458 port_table_entry = NULL;
6459 list_for_each_entry(port_table_entry, &ioc->port_table_list, list) {
6460 drsprintk(ioc, ioc_info(ioc,
6461 "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n",
6462 port_table_entry->port_id,
6463 port_table_entry->phy_mask,
6464 port_table_entry->sas_address));
6467 for (j = 0; j < port_count; j++) {
6468 ret = _scsih_look_and_get_matched_port_entry(ioc,
6469 &port_table[j], &port_entry, &count);
6471 drsprintk(ioc, ioc_info(ioc,
6472 "No Matched entry for sas_addr(0x%16llx), Port:%d\n",
6473 port_table[j].sas_address,
6474 port_table[j].port_id));
6479 case MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT:
6480 case MATCHED_WITH_ADDR_AND_SUBPHYMASK:
6481 _scsih_add_or_del_phys_from_existing_port(ioc,
6482 port_entry, port_table, j, port_count);
6484 case MATCHED_WITH_ADDR:
6485 sas_addr = port_table[j].sas_address;
6486 for (i = 0; i < port_count; i++) {
6487 if (port_table[i].sas_address == sas_addr)
6491 if (count > 1 || lcount > 1)
6494 _scsih_add_or_del_phys_from_existing_port(ioc,
6495 port_entry, port_table, j, port_count);
6501 if (port_entry->port_id != port_table[j].port_id)
6502 port_entry->port_id = port_table[j].port_id;
6503 port_entry->flags &= ~HBA_PORT_FLAG_DIRTY_PORT;
6504 port_entry->phy_mask = port_table[j].phy_mask;
6507 port_table_entry = NULL;
6511 * _scsih_alloc_vphy - allocate virtual_phy object
6512 * @ioc: per adapter object
6513 * @port_id: Port ID number
6514 * @phy_num: HBA Phy number
6516 * Returns allocated virtual_phy object.
6518 static struct virtual_phy *
6519 _scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num)
6521 struct virtual_phy *vphy;
6522 struct hba_port *port;
6524 port = mpt3sas_get_port_by_id(ioc, port_id, 0);
6528 vphy = mpt3sas_get_vphy_by_phy(ioc, port, phy_num);
6530 vphy = kzalloc(sizeof(struct virtual_phy), GFP_KERNEL);
6534 if (!port->vphys_mask)
6535 INIT_LIST_HEAD(&port->vphys_list);
6538 * Enable bit corresponding to HBA phy number on its
6539 * parent hba_port object's vphys_mask field.
6541 port->vphys_mask |= (1 << phy_num);
6542 vphy->phy_mask |= (1 << phy_num);
6544 list_add_tail(&vphy->list, &port->vphys_list);
6547 "vphy entry: %p, port id: %d, phy:%d is added to port's vphys_list\n",
6548 vphy, port->port_id, phy_num);
6554 * _scsih_sas_host_refresh - refreshing sas host object contents
6555 * @ioc: per adapter object
6558 * During port enable, fw will send topology events for every device. Its
6559 * possible that the handles may change from the previous setting, so this
6560 * code keeping handles updating if changed.
6563 _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
6568 Mpi2ConfigReply_t mpi_reply;
6569 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6570 u16 attached_handle;
6571 u8 link_rate, port_id;
6572 struct hba_port *port;
6573 Mpi2SasPhyPage0_t phy_pg0;
6576 ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n",
6577 (u64)ioc->sas_hba.sas_address));
6579 sz = struct_size(sas_iounit_pg0, PhyData, ioc->sas_hba.num_phys);
6580 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6581 if (!sas_iounit_pg0) {
6582 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6583 __FILE__, __LINE__, __func__);
6587 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6588 sas_iounit_pg0, sz)) != 0)
6590 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6591 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6593 for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
6594 link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4;
6596 ioc->sas_hba.handle = le16_to_cpu(
6597 sas_iounit_pg0->PhyData[0].ControllerDevHandle);
6598 port_id = sas_iounit_pg0->PhyData[i].Port;
6599 if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) {
6600 port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);
6604 port->port_id = port_id;
6606 "hba_port entry: %p, port: %d is added to hba_port list\n",
6607 port, port->port_id);
6608 if (ioc->shost_recovery)
6609 port->flags = HBA_PORT_FLAG_NEW_PORT;
6610 list_add_tail(&port->list, &ioc->port_table_list);
6613 * Check whether current Phy belongs to HBA vSES device or not.
6615 if (le32_to_cpu(sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) &
6616 MPI2_SAS_DEVICE_INFO_SEP &&
6617 (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) {
6618 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply,
6621 "failure at %s:%d/%s()!\n",
6622 __FILE__, __LINE__, __func__);
6625 if (!(le32_to_cpu(phy_pg0.PhyInfo) &
6626 MPI2_SAS_PHYINFO_VIRTUAL_PHY))
6629 * Allocate a virtual_phy object for vSES device, if
6630 * this vSES device is hot added.
6632 if (!_scsih_alloc_vphy(ioc, port_id, i))
6634 ioc->sas_hba.phy[i].hba_vphy = 1;
6638 * Add new HBA phys to STL if these new phys got added as part
6639 * of HBA Firmware upgrade/downgrade operation.
6641 if (!ioc->sas_hba.phy[i].phy) {
6642 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply,
6644 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6645 __FILE__, __LINE__, __func__);
6648 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6649 MPI2_IOCSTATUS_MASK;
6650 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6651 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6652 __FILE__, __LINE__, __func__);
6655 ioc->sas_hba.phy[i].phy_id = i;
6656 mpt3sas_transport_add_host_phy(ioc,
6657 &ioc->sas_hba.phy[i], phy_pg0,
6658 ioc->sas_hba.parent_dev);
6661 ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
6662 attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i].
6664 if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
6665 link_rate = MPI2_SAS_NEG_LINK_RATE_1_5;
6666 ioc->sas_hba.phy[i].port =
6667 mpt3sas_get_port_by_id(ioc, port_id, 0);
6668 mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address,
6669 attached_handle, i, link_rate,
6670 ioc->sas_hba.phy[i].port);
6673 * Clear the phy details if this phy got disabled as part of
6674 * HBA Firmware upgrade/downgrade operation.
6676 for (i = ioc->sas_hba.num_phys;
6677 i < ioc->sas_hba.nr_phys_allocated; i++) {
6678 if (ioc->sas_hba.phy[i].phy &&
6679 ioc->sas_hba.phy[i].phy->negotiated_linkrate >=
6680 SAS_LINK_RATE_1_5_GBPS)
6681 mpt3sas_transport_update_links(ioc,
6682 ioc->sas_hba.sas_address, 0, i,
6683 MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED, NULL);
6686 kfree(sas_iounit_pg0);
6690 * _scsih_sas_host_add - create sas host object
6691 * @ioc: per adapter object
6693 * Creating host side data object, stored in ioc->sas_hba
6696 _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
6699 Mpi2ConfigReply_t mpi_reply;
6700 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6701 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
6702 Mpi2SasPhyPage0_t phy_pg0;
6703 Mpi2SasDevicePage0_t sas_device_pg0;
6704 Mpi2SasEnclosurePage0_t enclosure_pg0;
6707 u8 device_missing_delay;
6708 u8 num_phys, port_id;
6709 struct hba_port *port;
6711 mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
6713 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6714 __FILE__, __LINE__, __func__);
6718 ioc->sas_hba.nr_phys_allocated = max_t(u8,
6719 MPT_MAX_HBA_NUM_PHYS, num_phys);
6720 ioc->sas_hba.phy = kcalloc(ioc->sas_hba.nr_phys_allocated,
6721 sizeof(struct _sas_phy), GFP_KERNEL);
6722 if (!ioc->sas_hba.phy) {
6723 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6724 __FILE__, __LINE__, __func__);
6727 ioc->sas_hba.num_phys = num_phys;
6729 /* sas_iounit page 0 */
6730 sz = struct_size(sas_iounit_pg0, PhyData, ioc->sas_hba.num_phys);
6731 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6732 if (!sas_iounit_pg0) {
6733 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6734 __FILE__, __LINE__, __func__);
6737 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6738 sas_iounit_pg0, sz))) {
6739 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6740 __FILE__, __LINE__, __func__);
6743 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6744 MPI2_IOCSTATUS_MASK;
6745 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6746 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6747 __FILE__, __LINE__, __func__);
6751 /* sas_iounit page 1 */
6752 sz = struct_size(sas_iounit_pg1, PhyData, ioc->sas_hba.num_phys);
6753 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
6754 if (!sas_iounit_pg1) {
6755 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6756 __FILE__, __LINE__, __func__);
6759 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
6760 sas_iounit_pg1, sz))) {
6761 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6762 __FILE__, __LINE__, __func__);
6765 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6766 MPI2_IOCSTATUS_MASK;
6767 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6768 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6769 __FILE__, __LINE__, __func__);
6773 ioc->io_missing_delay =
6774 sas_iounit_pg1->IODeviceMissingDelay;
6775 device_missing_delay =
6776 sas_iounit_pg1->ReportDeviceMissingDelay;
6777 if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
6778 ioc->device_missing_delay = (device_missing_delay &
6779 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
6781 ioc->device_missing_delay = device_missing_delay &
6782 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
6784 ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev;
6785 for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
6786 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
6788 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6789 __FILE__, __LINE__, __func__);
6792 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6793 MPI2_IOCSTATUS_MASK;
6794 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6795 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6796 __FILE__, __LINE__, __func__);
6801 ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
6802 PhyData[0].ControllerDevHandle);
6804 port_id = sas_iounit_pg0->PhyData[i].Port;
6805 if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) {
6806 port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);
6810 port->port_id = port_id;
6812 "hba_port entry: %p, port: %d is added to hba_port list\n",
6813 port, port->port_id);
6814 list_add_tail(&port->list,
6815 &ioc->port_table_list);
6819 * Check whether current Phy belongs to HBA vSES device or not.
6821 if ((le32_to_cpu(phy_pg0.PhyInfo) &
6822 MPI2_SAS_PHYINFO_VIRTUAL_PHY) &&
6823 (phy_pg0.NegotiatedLinkRate >> 4) >=
6824 MPI2_SAS_NEG_LINK_RATE_1_5) {
6826 * Allocate a virtual_phy object for vSES device.
6828 if (!_scsih_alloc_vphy(ioc, port_id, i))
6830 ioc->sas_hba.phy[i].hba_vphy = 1;
6833 ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
6834 ioc->sas_hba.phy[i].phy_id = i;
6835 ioc->sas_hba.phy[i].port =
6836 mpt3sas_get_port_by_id(ioc, port_id, 0);
6837 mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i],
6838 phy_pg0, ioc->sas_hba.parent_dev);
6840 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
6841 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) {
6842 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6843 __FILE__, __LINE__, __func__);
6846 ioc->sas_hba.enclosure_handle =
6847 le16_to_cpu(sas_device_pg0.EnclosureHandle);
6848 ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
6849 ioc_info(ioc, "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
6850 ioc->sas_hba.handle,
6851 (u64)ioc->sas_hba.sas_address,
6852 ioc->sas_hba.num_phys);
6854 if (ioc->sas_hba.enclosure_handle) {
6855 if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
6856 &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
6857 ioc->sas_hba.enclosure_handle)))
6858 ioc->sas_hba.enclosure_logical_id =
6859 le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
6863 kfree(sas_iounit_pg1);
6864 kfree(sas_iounit_pg0);
6868 * _scsih_expander_add - creating expander object
6869 * @ioc: per adapter object
6870 * @handle: expander handle
6872 * Creating expander object, stored in ioc->sas_expander_list.
6874 * Return: 0 for success, else error.
6877 _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6879 struct _sas_node *sas_expander;
6880 struct _enclosure_node *enclosure_dev;
6881 Mpi2ConfigReply_t mpi_reply;
6882 Mpi2ExpanderPage0_t expander_pg0;
6883 Mpi2ExpanderPage1_t expander_pg1;
6886 u64 sas_address, sas_address_parent = 0;
6888 unsigned long flags;
6889 struct _sas_port *mpt3sas_port = NULL;
6897 if (ioc->shost_recovery || ioc->pci_error_recovery)
6900 if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
6901 MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) {
6902 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6903 __FILE__, __LINE__, __func__);
6907 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6908 MPI2_IOCSTATUS_MASK;
6909 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6910 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6911 __FILE__, __LINE__, __func__);
6915 /* handle out of order topology events */
6916 parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle);
6917 if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent)
6919 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6920 __FILE__, __LINE__, __func__);
6924 port_id = expander_pg0.PhysicalPort;
6925 if (sas_address_parent != ioc->sas_hba.sas_address) {
6926 spin_lock_irqsave(&ioc->sas_node_lock, flags);
6927 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
6929 mpt3sas_get_port_by_id(ioc, port_id, 0));
6930 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6931 if (!sas_expander) {
6932 rc = _scsih_expander_add(ioc, parent_handle);
6938 spin_lock_irqsave(&ioc->sas_node_lock, flags);
6939 sas_address = le64_to_cpu(expander_pg0.SASAddress);
6940 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
6941 sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0));
6942 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6947 sas_expander = kzalloc(sizeof(struct _sas_node),
6949 if (!sas_expander) {
6950 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6951 __FILE__, __LINE__, __func__);
6955 sas_expander->handle = handle;
6956 sas_expander->num_phys = expander_pg0.NumPhys;
6957 sas_expander->sas_address_parent = sas_address_parent;
6958 sas_expander->sas_address = sas_address;
6959 sas_expander->port = mpt3sas_get_port_by_id(ioc, port_id, 0);
6960 if (!sas_expander->port) {
6961 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6962 __FILE__, __LINE__, __func__);
6967 ioc_info(ioc, "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
6968 handle, parent_handle,
6969 (u64)sas_expander->sas_address, sas_expander->num_phys);
6971 if (!sas_expander->num_phys) {
6975 sas_expander->phy = kcalloc(sas_expander->num_phys,
6976 sizeof(struct _sas_phy), GFP_KERNEL);
6977 if (!sas_expander->phy) {
6978 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6979 __FILE__, __LINE__, __func__);
6984 INIT_LIST_HEAD(&sas_expander->sas_port_list);
6985 mpt3sas_port = mpt3sas_transport_port_add(ioc, handle,
6986 sas_address_parent, sas_expander->port);
6987 if (!mpt3sas_port) {
6988 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6989 __FILE__, __LINE__, __func__);
6993 sas_expander->parent_dev = &mpt3sas_port->rphy->dev;
6994 sas_expander->rphy = mpt3sas_port->rphy;
6996 for (i = 0 ; i < sas_expander->num_phys ; i++) {
6997 if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
6998 &expander_pg1, i, handle))) {
6999 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7000 __FILE__, __LINE__, __func__);
7004 sas_expander->phy[i].handle = handle;
7005 sas_expander->phy[i].phy_id = i;
7006 sas_expander->phy[i].port =
7007 mpt3sas_get_port_by_id(ioc, port_id, 0);
7009 if ((mpt3sas_transport_add_expander_phy(ioc,
7010 &sas_expander->phy[i], expander_pg1,
7011 sas_expander->parent_dev))) {
7012 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7013 __FILE__, __LINE__, __func__);
7019 if (sas_expander->enclosure_handle) {
7021 mpt3sas_scsih_enclosure_find_by_handle(ioc,
7022 sas_expander->enclosure_handle);
7024 sas_expander->enclosure_logical_id =
7025 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
7028 _scsih_expander_node_add(ioc, sas_expander);
7034 mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
7035 sas_address_parent, sas_expander->port);
7036 kfree(sas_expander);
7041 * mpt3sas_expander_remove - removing expander object
7042 * @ioc: per adapter object
7043 * @sas_address: expander sas_address
7044 * @port: hba port entry
7047 mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
7048 struct hba_port *port)
7050 struct _sas_node *sas_expander;
7051 unsigned long flags;
7053 if (ioc->shost_recovery)
7059 spin_lock_irqsave(&ioc->sas_node_lock, flags);
7060 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
7062 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
7064 _scsih_expander_node_remove(ioc, sas_expander);
7068 * _scsih_done - internal SCSI_IO callback handler.
7069 * @ioc: per adapter object
7070 * @smid: system request message index
7071 * @msix_index: MSIX table index supplied by the OS
7072 * @reply: reply message frame(lower 32bit addr)
7074 * Callback handler when sending internal generated SCSI_IO.
7075 * The callback index passed is `ioc->scsih_cb_idx`
7077 * Return: 1 meaning mf should be freed from _base_interrupt
7078 * 0 means the mf is freed from this function.
7081 _scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
7083 MPI2DefaultReply_t *mpi_reply;
7085 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
7086 if (ioc->scsih_cmds.status == MPT3_CMD_NOT_USED)
7088 if (ioc->scsih_cmds.smid != smid)
7090 ioc->scsih_cmds.status |= MPT3_CMD_COMPLETE;
7092 memcpy(ioc->scsih_cmds.reply, mpi_reply,
7093 mpi_reply->MsgLength*4);
7094 ioc->scsih_cmds.status |= MPT3_CMD_REPLY_VALID;
7096 ioc->scsih_cmds.status &= ~MPT3_CMD_PENDING;
7097 complete(&ioc->scsih_cmds.done);
7104 #define MPT3_MAX_LUNS (255)
7108 * _scsih_check_access_status - check access flags
7109 * @ioc: per adapter object
7110 * @sas_address: sas address
7111 * @handle: sas device handle
7112 * @access_status: errors returned during discovery of the device
7114 * Return: 0 for success, else failure
7117 _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
7118 u16 handle, u8 access_status)
7123 switch (access_status) {
7124 case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS:
7125 case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION:
7128 case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED:
7129 desc = "sata capability failed";
7131 case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT:
7132 desc = "sata affiliation conflict";
7134 case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE:
7135 desc = "route not addressable";
7137 case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE:
7138 desc = "smp error not addressable";
7140 case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED:
7141 desc = "device blocked";
7143 case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED:
7144 case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN:
7145 case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT:
7146 case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG:
7147 case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION:
7148 case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER:
7149 case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN:
7150 case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN:
7151 case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN:
7152 case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION:
7153 case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE:
7154 case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX:
7155 desc = "sata initialization failed";
7165 ioc_err(ioc, "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
7166 desc, (u64)sas_address, handle);
7171 * _scsih_check_device - checking device responsiveness
7172 * @ioc: per adapter object
7173 * @parent_sas_address: sas address of parent expander or sas host
7174 * @handle: attached device handle
7175 * @phy_number: phy number
7176 * @link_rate: new link rate
7179 _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
7180 u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate)
7182 Mpi2ConfigReply_t mpi_reply;
7183 Mpi2SasDevicePage0_t sas_device_pg0;
7184 struct _sas_device *sas_device = NULL;
7185 struct _enclosure_node *enclosure_dev = NULL;
7187 unsigned long flags;
7189 struct scsi_target *starget;
7190 struct MPT3SAS_TARGET *sas_target_priv_data;
7192 struct hba_port *port;
7194 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
7195 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
7198 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
7199 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
7202 /* wide port handling ~ we need only handle device once for the phy that
7203 * is matched in sas device page zero
7205 if (phy_number != sas_device_pg0.PhyNum)
7208 /* check if this is end device */
7209 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
7210 if (!(_scsih_is_end_device(device_info)))
7213 spin_lock_irqsave(&ioc->sas_device_lock, flags);
7214 sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
7215 port = mpt3sas_get_port_by_id(ioc, sas_device_pg0.PhysicalPort, 0);
7218 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
7224 if (unlikely(sas_device->handle != handle)) {
7225 starget = sas_device->starget;
7226 sas_target_priv_data = starget->hostdata;
7227 starget_printk(KERN_INFO, starget,
7228 "handle changed from(0x%04x) to (0x%04x)!!!\n",
7229 sas_device->handle, handle);
7230 sas_target_priv_data->handle = handle;
7231 sas_device->handle = handle;
7232 if (le16_to_cpu(sas_device_pg0.Flags) &
7233 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
7234 sas_device->enclosure_level =
7235 sas_device_pg0.EnclosureLevel;
7236 memcpy(sas_device->connector_name,
7237 sas_device_pg0.ConnectorName, 4);
7238 sas_device->connector_name[4] = '\0';
7240 sas_device->enclosure_level = 0;
7241 sas_device->connector_name[0] = '\0';
7244 sas_device->enclosure_handle =
7245 le16_to_cpu(sas_device_pg0.EnclosureHandle);
7246 sas_device->is_chassis_slot_valid = 0;
7247 enclosure_dev = mpt3sas_scsih_enclosure_find_by_handle(ioc,
7248 sas_device->enclosure_handle);
7249 if (enclosure_dev) {
7250 sas_device->enclosure_logical_id =
7251 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
7252 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
7253 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
7254 sas_device->is_chassis_slot_valid = 1;
7255 sas_device->chassis_slot =
7256 enclosure_dev->pg0.ChassisSlot;
7261 /* check if device is present */
7262 if (!(le16_to_cpu(sas_device_pg0.Flags) &
7263 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
7264 ioc_err(ioc, "device is not present handle(0x%04x), flags!!!\n",
7269 /* check if there were any issues with discovery */
7270 if (_scsih_check_access_status(ioc, sas_address, handle,
7271 sas_device_pg0.AccessStatus))
7274 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7275 _scsih_ublock_io_device(ioc, sas_address, port);
7278 sas_device_put(sas_device);
7282 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7284 sas_device_put(sas_device);
7288 * _scsih_add_device - creating sas device object
7289 * @ioc: per adapter object
7290 * @handle: sas device handle
7291 * @phy_num: phy number end device attached to
7292 * @is_pd: is this hidden raid component
7294 * Creating end device object, stored in ioc->sas_device_list.
7296 * Return: 0 for success, non-zero for failure.
7299 _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
7302 Mpi2ConfigReply_t mpi_reply;
7303 Mpi2SasDevicePage0_t sas_device_pg0;
7304 struct _sas_device *sas_device;
7305 struct _enclosure_node *enclosure_dev = NULL;
7311 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
7312 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
7313 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7314 __FILE__, __LINE__, __func__);
7318 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
7319 MPI2_IOCSTATUS_MASK;
7320 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7321 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7322 __FILE__, __LINE__, __func__);
7326 /* check if this is end device */
7327 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
7328 if (!(_scsih_is_end_device(device_info)))
7330 set_bit(handle, ioc->pend_os_device_add);
7331 sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
7333 /* check if device is present */
7334 if (!(le16_to_cpu(sas_device_pg0.Flags) &
7335 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
7336 ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
7341 /* check if there were any issues with discovery */
7342 if (_scsih_check_access_status(ioc, sas_address, handle,
7343 sas_device_pg0.AccessStatus))
7346 port_id = sas_device_pg0.PhysicalPort;
7347 sas_device = mpt3sas_get_sdev_by_addr(ioc,
7348 sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0));
7350 clear_bit(handle, ioc->pend_os_device_add);
7351 sas_device_put(sas_device);
7355 if (sas_device_pg0.EnclosureHandle) {
7357 mpt3sas_scsih_enclosure_find_by_handle(ioc,
7358 le16_to_cpu(sas_device_pg0.EnclosureHandle));
7359 if (enclosure_dev == NULL)
7360 ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
7361 sas_device_pg0.EnclosureHandle);
7364 sas_device = kzalloc(sizeof(struct _sas_device),
7367 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7368 __FILE__, __LINE__, __func__);
7372 kref_init(&sas_device->refcount);
7373 sas_device->handle = handle;
7374 if (_scsih_get_sas_address(ioc,
7375 le16_to_cpu(sas_device_pg0.ParentDevHandle),
7376 &sas_device->sas_address_parent) != 0)
7377 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7378 __FILE__, __LINE__, __func__);
7379 sas_device->enclosure_handle =
7380 le16_to_cpu(sas_device_pg0.EnclosureHandle);
7381 if (sas_device->enclosure_handle != 0)
7383 le16_to_cpu(sas_device_pg0.Slot);
7384 sas_device->device_info = device_info;
7385 sas_device->sas_address = sas_address;
7386 sas_device->phy = sas_device_pg0.PhyNum;
7387 sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
7388 MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
7389 sas_device->port = mpt3sas_get_port_by_id(ioc, port_id, 0);
7390 if (!sas_device->port) {
7391 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7392 __FILE__, __LINE__, __func__);
7396 if (le16_to_cpu(sas_device_pg0.Flags)
7397 & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
7398 sas_device->enclosure_level =
7399 sas_device_pg0.EnclosureLevel;
7400 memcpy(sas_device->connector_name,
7401 sas_device_pg0.ConnectorName, 4);
7402 sas_device->connector_name[4] = '\0';
7404 sas_device->enclosure_level = 0;
7405 sas_device->connector_name[0] = '\0';
7407 /* get enclosure_logical_id & chassis_slot*/
7408 sas_device->is_chassis_slot_valid = 0;
7409 if (enclosure_dev) {
7410 sas_device->enclosure_logical_id =
7411 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
7412 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
7413 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
7414 sas_device->is_chassis_slot_valid = 1;
7415 sas_device->chassis_slot =
7416 enclosure_dev->pg0.ChassisSlot;
7420 /* get device name */
7421 sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
7422 sas_device->port_type = sas_device_pg0.MaxPortConnections;
7424 "handle(0x%0x) sas_address(0x%016llx) port_type(0x%0x)\n",
7425 handle, sas_device->sas_address, sas_device->port_type);
7427 if (ioc->wait_for_discovery_to_complete)
7428 _scsih_sas_device_init_add(ioc, sas_device);
7430 _scsih_sas_device_add(ioc, sas_device);
7433 sas_device_put(sas_device);
7438 * _scsih_remove_device - removing sas device object
7439 * @ioc: per adapter object
7440 * @sas_device: the sas_device object
7443 _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
7444 struct _sas_device *sas_device)
7446 struct MPT3SAS_TARGET *sas_target_priv_data;
7448 if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) &&
7449 (sas_device->pfa_led_on)) {
7450 _scsih_turn_off_pfa_led(ioc, sas_device);
7451 sas_device->pfa_led_on = 0;
7455 ioc_info(ioc, "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
7457 sas_device->handle, (u64)sas_device->sas_address));
7459 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
7462 if (sas_device->starget && sas_device->starget->hostdata) {
7463 sas_target_priv_data = sas_device->starget->hostdata;
7464 sas_target_priv_data->deleted = 1;
7465 _scsih_ublock_io_device(ioc, sas_device->sas_address,
7467 sas_target_priv_data->handle =
7468 MPT3SAS_INVALID_DEVICE_HANDLE;
7471 if (!ioc->hide_drives)
7472 mpt3sas_transport_port_remove(ioc,
7473 sas_device->sas_address,
7474 sas_device->sas_address_parent,
7477 ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
7478 sas_device->handle, (u64)sas_device->sas_address);
7480 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
7483 ioc_info(ioc, "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
7485 sas_device->handle, (u64)sas_device->sas_address));
7486 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
7491 * _scsih_sas_topology_change_event_debug - debug for topology event
7492 * @ioc: per adapter object
7493 * @event_data: event data payload
7497 _scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7498 Mpi2EventDataSasTopologyChangeList_t *event_data)
7504 char *status_str = NULL;
7505 u8 link_rate, prev_link_rate;
7507 switch (event_data->ExpStatus) {
7508 case MPI2_EVENT_SAS_TOPO_ES_ADDED:
7511 case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
7512 status_str = "remove";
7514 case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
7516 status_str = "responding";
7518 case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
7519 status_str = "remove delay";
7522 status_str = "unknown status";
7525 ioc_info(ioc, "sas topology change: (%s)\n", status_str);
7526 pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \
7527 "start_phy(%02d), count(%d)\n",
7528 le16_to_cpu(event_data->ExpanderDevHandle),
7529 le16_to_cpu(event_data->EnclosureHandle),
7530 event_data->StartPhyNum, event_data->NumEntries);
7531 for (i = 0; i < event_data->NumEntries; i++) {
7532 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
7535 phy_number = event_data->StartPhyNum + i;
7536 reason_code = event_data->PHY[i].PhyStatus &
7537 MPI2_EVENT_SAS_TOPO_RC_MASK;
7538 switch (reason_code) {
7539 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
7540 status_str = "target add";
7542 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
7543 status_str = "target remove";
7545 case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
7546 status_str = "delay target remove";
7548 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
7549 status_str = "link rate change";
7551 case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
7552 status_str = "target responding";
7555 status_str = "unknown";
7558 link_rate = event_data->PHY[i].LinkRate >> 4;
7559 prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
7560 pr_info("\tphy(%02d), attached_handle(0x%04x): %s:" \
7561 " link rate: new(0x%02x), old(0x%02x)\n", phy_number,
7562 handle, status_str, link_rate, prev_link_rate);
7568 * _scsih_sas_topology_change_event - handle topology changes
7569 * @ioc: per adapter object
7570 * @fw_event: The fw_event_work object
7575 _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
7576 struct fw_event_work *fw_event)
7579 u16 parent_handle, handle;
7581 u8 phy_number, max_phys;
7582 struct _sas_node *sas_expander;
7584 unsigned long flags;
7585 u8 link_rate, prev_link_rate;
7586 struct hba_port *port;
7587 Mpi2EventDataSasTopologyChangeList_t *event_data =
7588 (Mpi2EventDataSasTopologyChangeList_t *)
7589 fw_event->event_data;
7591 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7592 _scsih_sas_topology_change_event_debug(ioc, event_data);
7594 if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery)
7597 if (!ioc->sas_hba.num_phys)
7598 _scsih_sas_host_add(ioc);
7600 _scsih_sas_host_refresh(ioc);
7602 if (fw_event->ignore) {
7603 dewtprintk(ioc, ioc_info(ioc, "ignoring expander event\n"));
7607 parent_handle = le16_to_cpu(event_data->ExpanderDevHandle);
7608 port = mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0);
7610 /* handle expander add */
7611 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED)
7612 if (_scsih_expander_add(ioc, parent_handle) != 0)
7615 spin_lock_irqsave(&ioc->sas_node_lock, flags);
7616 sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
7619 sas_address = sas_expander->sas_address;
7620 max_phys = sas_expander->num_phys;
7621 port = sas_expander->port;
7622 } else if (parent_handle < ioc->sas_hba.num_phys) {
7623 sas_address = ioc->sas_hba.sas_address;
7624 max_phys = ioc->sas_hba.num_phys;
7626 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
7629 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
7631 /* handle siblings events */
7632 for (i = 0; i < event_data->NumEntries; i++) {
7633 if (fw_event->ignore) {
7635 ioc_info(ioc, "ignoring expander event\n"));
7638 if (ioc->remove_host || ioc->pci_error_recovery)
7640 phy_number = event_data->StartPhyNum + i;
7641 if (phy_number >= max_phys)
7643 reason_code = event_data->PHY[i].PhyStatus &
7644 MPI2_EVENT_SAS_TOPO_RC_MASK;
7645 if ((event_data->PHY[i].PhyStatus &
7646 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code !=
7647 MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING))
7649 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
7652 link_rate = event_data->PHY[i].LinkRate >> 4;
7653 prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
7654 switch (reason_code) {
7655 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
7657 if (ioc->shost_recovery)
7660 if (link_rate == prev_link_rate)
7663 mpt3sas_transport_update_links(ioc, sas_address,
7664 handle, phy_number, link_rate, port);
7666 if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
7669 _scsih_check_device(ioc, sas_address, handle,
7670 phy_number, link_rate);
7672 if (!test_bit(handle, ioc->pend_os_device_add))
7677 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
7679 if (ioc->shost_recovery)
7682 mpt3sas_transport_update_links(ioc, sas_address,
7683 handle, phy_number, link_rate, port);
7685 _scsih_add_device(ioc, handle, phy_number, 0);
7688 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
7690 _scsih_device_remove_by_handle(ioc, handle);
7695 /* handle expander removal */
7696 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING &&
7698 mpt3sas_expander_remove(ioc, sas_address, port);
7704 * _scsih_sas_device_status_change_event_debug - debug for device event
7706 * @event_data: event data payload
7710 _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7711 Mpi2EventDataSasDeviceStatusChange_t *event_data)
7713 char *reason_str = NULL;
7715 switch (event_data->ReasonCode) {
7716 case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
7717 reason_str = "smart data";
7719 case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
7720 reason_str = "unsupported device discovered";
7722 case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
7723 reason_str = "internal device reset";
7725 case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
7726 reason_str = "internal task abort";
7728 case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
7729 reason_str = "internal task abort set";
7731 case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
7732 reason_str = "internal clear task set";
7734 case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
7735 reason_str = "internal query task";
7737 case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE:
7738 reason_str = "sata init failure";
7740 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7741 reason_str = "internal device reset complete";
7743 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7744 reason_str = "internal task abort complete";
7746 case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
7747 reason_str = "internal async notification";
7749 case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY:
7750 reason_str = "expander reduced functionality";
7752 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY:
7753 reason_str = "expander reduced functionality complete";
7756 reason_str = "unknown reason";
7759 ioc_info(ioc, "device status change: (%s)\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
7760 reason_str, le16_to_cpu(event_data->DevHandle),
7761 (u64)le64_to_cpu(event_data->SASAddress),
7762 le16_to_cpu(event_data->TaskTag));
7763 if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
7764 pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
7765 event_data->ASC, event_data->ASCQ);
7770 * _scsih_sas_device_status_change_event - handle device status change
7771 * @ioc: per adapter object
7772 * @event_data: The fw event
7776 _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
7777 Mpi2EventDataSasDeviceStatusChange_t *event_data)
7779 struct MPT3SAS_TARGET *target_priv_data;
7780 struct _sas_device *sas_device;
7782 unsigned long flags;
7784 /* In MPI Revision K (0xC), the internal device reset complete was
7785 * implemented, so avoid setting tm_busy flag for older firmware.
7787 if ((ioc->facts.HeaderVersion >> 8) < 0xC)
7790 if (event_data->ReasonCode !=
7791 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
7792 event_data->ReasonCode !=
7793 MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
7796 spin_lock_irqsave(&ioc->sas_device_lock, flags);
7797 sas_address = le64_to_cpu(event_data->SASAddress);
7798 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
7800 mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0));
7802 if (!sas_device || !sas_device->starget)
7805 target_priv_data = sas_device->starget->hostdata;
7806 if (!target_priv_data)
7809 if (event_data->ReasonCode ==
7810 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
7811 target_priv_data->tm_busy = 1;
7813 target_priv_data->tm_busy = 0;
7815 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7817 "%s tm_busy flag for handle(0x%04x)\n",
7818 (target_priv_data->tm_busy == 1) ? "Enable" : "Disable",
7819 target_priv_data->handle);
7823 sas_device_put(sas_device);
7825 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7830 * _scsih_check_pcie_access_status - check access flags
7831 * @ioc: per adapter object
7833 * @handle: sas device handle
7834 * @access_status: errors returned during discovery of the device
7836 * Return: 0 for success, else failure
7839 _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
7840 u16 handle, u8 access_status)
7845 switch (access_status) {
7846 case MPI26_PCIEDEV0_ASTATUS_NO_ERRORS:
7847 case MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION:
7850 case MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED:
7851 desc = "PCIe device capability failed";
7853 case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED:
7854 desc = "PCIe device blocked";
7856 "Device with Access Status (%s): wwid(0x%016llx), "
7857 "handle(0x%04x)\n ll only be added to the internal list",
7858 desc, (u64)wwid, handle);
7861 case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED:
7862 desc = "PCIe device mem space access failed";
7864 case MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE:
7865 desc = "PCIe device unsupported";
7867 case MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED:
7868 desc = "PCIe device MSIx Required";
7870 case MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX:
7871 desc = "PCIe device init fail max";
7873 case MPI26_PCIEDEV0_ASTATUS_UNKNOWN:
7874 desc = "PCIe device status unknown";
7876 case MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT:
7877 desc = "nvme ready timeout";
7879 case MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED:
7880 desc = "nvme device configuration unsupported";
7882 case MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED:
7883 desc = "nvme identify failed";
7885 case MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED:
7886 desc = "nvme qconfig failed";
7888 case MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED:
7889 desc = "nvme qcreation failed";
7891 case MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED:
7892 desc = "nvme eventcfg failed";
7894 case MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED:
7895 desc = "nvme get feature stat failed";
7897 case MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT:
7898 desc = "nvme idle timeout";
7900 case MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS:
7901 desc = "nvme failure status";
7904 ioc_err(ioc, "NVMe discovery error(0x%02x): wwid(0x%016llx), handle(0x%04x)\n",
7905 access_status, (u64)wwid, handle);
7912 ioc_info(ioc, "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n",
7913 desc, (u64)wwid, handle);
7918 * _scsih_pcie_device_remove_from_sml - removing pcie device
7919 * from SML and free up associated memory
7920 * @ioc: per adapter object
7921 * @pcie_device: the pcie_device object
7924 _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
7925 struct _pcie_device *pcie_device)
7927 struct MPT3SAS_TARGET *sas_target_priv_data;
7930 ioc_info(ioc, "%s: enter: handle(0x%04x), wwid(0x%016llx)\n",
7932 pcie_device->handle, (u64)pcie_device->wwid));
7933 if (pcie_device->enclosure_handle != 0)
7935 ioc_info(ioc, "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
7937 (u64)pcie_device->enclosure_logical_id,
7938 pcie_device->slot));
7939 if (pcie_device->connector_name[0] != '\0')
7941 ioc_info(ioc, "%s: enter: enclosure level(0x%04x), connector name(%s)\n",
7943 pcie_device->enclosure_level,
7944 pcie_device->connector_name));
7946 if (pcie_device->starget && pcie_device->starget->hostdata) {
7947 sas_target_priv_data = pcie_device->starget->hostdata;
7948 sas_target_priv_data->deleted = 1;
7949 _scsih_ublock_io_device(ioc, pcie_device->wwid, NULL);
7950 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
7953 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
7954 pcie_device->handle, (u64)pcie_device->wwid);
7955 if (pcie_device->enclosure_handle != 0)
7956 ioc_info(ioc, "removing : enclosure logical id(0x%016llx), slot(%d)\n",
7957 (u64)pcie_device->enclosure_logical_id,
7959 if (pcie_device->connector_name[0] != '\0')
7960 ioc_info(ioc, "removing: enclosure level(0x%04x), connector name( %s)\n",
7961 pcie_device->enclosure_level,
7962 pcie_device->connector_name);
7964 if (pcie_device->starget && (pcie_device->access_status !=
7965 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED))
7966 scsi_remove_target(&pcie_device->starget->dev);
7968 ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n",
7970 pcie_device->handle, (u64)pcie_device->wwid));
7971 if (pcie_device->enclosure_handle != 0)
7973 ioc_info(ioc, "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
7975 (u64)pcie_device->enclosure_logical_id,
7976 pcie_device->slot));
7977 if (pcie_device->connector_name[0] != '\0')
7979 ioc_info(ioc, "%s: exit: enclosure level(0x%04x), connector name( %s)\n",
7981 pcie_device->enclosure_level,
7982 pcie_device->connector_name));
7984 kfree(pcie_device->serial_number);
7989 * _scsih_pcie_check_device - checking device responsiveness
7990 * @ioc: per adapter object
7991 * @handle: attached device handle
7994 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
7996 Mpi2ConfigReply_t mpi_reply;
7997 Mpi26PCIeDevicePage0_t pcie_device_pg0;
7999 struct _pcie_device *pcie_device;
8001 unsigned long flags;
8002 struct scsi_target *starget;
8003 struct MPT3SAS_TARGET *sas_target_priv_data;
8006 if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
8007 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle)))
8010 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
8011 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
8014 /* check if this is end device */
8015 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
8016 if (!(_scsih_is_nvme_pciescsi_device(device_info)))
8019 wwid = le64_to_cpu(pcie_device_pg0.WWID);
8020 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8021 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
8024 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8028 if (unlikely(pcie_device->handle != handle)) {
8029 starget = pcie_device->starget;
8030 sas_target_priv_data = starget->hostdata;
8031 pcie_device->access_status = pcie_device_pg0.AccessStatus;
8032 starget_printk(KERN_INFO, starget,
8033 "handle changed from(0x%04x) to (0x%04x)!!!\n",
8034 pcie_device->handle, handle);
8035 sas_target_priv_data->handle = handle;
8036 pcie_device->handle = handle;
8038 if (le32_to_cpu(pcie_device_pg0.Flags) &
8039 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
8040 pcie_device->enclosure_level =
8041 pcie_device_pg0.EnclosureLevel;
8042 memcpy(&pcie_device->connector_name[0],
8043 &pcie_device_pg0.ConnectorName[0], 4);
8045 pcie_device->enclosure_level = 0;
8046 pcie_device->connector_name[0] = '\0';
8050 /* check if device is present */
8051 if (!(le32_to_cpu(pcie_device_pg0.Flags) &
8052 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
8053 ioc_info(ioc, "device is not present handle(0x%04x), flags!!!\n",
8055 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8056 pcie_device_put(pcie_device);
8060 /* check if there were any issues with discovery */
8061 if (_scsih_check_pcie_access_status(ioc, wwid, handle,
8062 pcie_device_pg0.AccessStatus)) {
8063 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8064 pcie_device_put(pcie_device);
8068 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8069 pcie_device_put(pcie_device);
8071 _scsih_ublock_io_device(ioc, wwid, NULL);
8077 * _scsih_pcie_add_device - creating pcie device object
8078 * @ioc: per adapter object
8079 * @handle: pcie device handle
8081 * Creating end device object, stored in ioc->pcie_device_list.
8083 * Return: 1 means queue the event later, 0 means complete the event
8086 _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
8088 Mpi26PCIeDevicePage0_t pcie_device_pg0;
8089 Mpi26PCIeDevicePage2_t pcie_device_pg2;
8090 Mpi2ConfigReply_t mpi_reply;
8091 struct _pcie_device *pcie_device;
8092 struct _enclosure_node *enclosure_dev;
8096 if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
8097 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) {
8098 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8099 __FILE__, __LINE__, __func__);
8102 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8103 MPI2_IOCSTATUS_MASK;
8104 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8105 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8106 __FILE__, __LINE__, __func__);
8110 set_bit(handle, ioc->pend_os_device_add);
8111 wwid = le64_to_cpu(pcie_device_pg0.WWID);
8113 /* check if device is present */
8114 if (!(le32_to_cpu(pcie_device_pg0.Flags) &
8115 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
8116 ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
8121 /* check if there were any issues with discovery */
8122 if (_scsih_check_pcie_access_status(ioc, wwid, handle,
8123 pcie_device_pg0.AccessStatus))
8126 if (!(_scsih_is_nvme_pciescsi_device(le32_to_cpu
8127 (pcie_device_pg0.DeviceInfo))))
8130 pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid);
8132 clear_bit(handle, ioc->pend_os_device_add);
8133 pcie_device_put(pcie_device);
8137 /* PCIe Device Page 2 contains read-only information about a
8138 * specific NVMe device; therefore, this page is only
8139 * valid for NVMe devices and skip for pcie devices of type scsi.
8141 if (!(mpt3sas_scsih_is_pcie_scsi_device(
8142 le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
8143 if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
8144 &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
8147 "failure at %s:%d/%s()!\n", __FILE__,
8148 __LINE__, __func__);
8152 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8153 MPI2_IOCSTATUS_MASK;
8154 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8156 "failure at %s:%d/%s()!\n", __FILE__,
8157 __LINE__, __func__);
8162 pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL);
8164 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8165 __FILE__, __LINE__, __func__);
8169 kref_init(&pcie_device->refcount);
8170 pcie_device->id = ioc->pcie_target_id++;
8171 pcie_device->channel = PCIE_CHANNEL;
8172 pcie_device->handle = handle;
8173 pcie_device->access_status = pcie_device_pg0.AccessStatus;
8174 pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
8175 pcie_device->wwid = wwid;
8176 pcie_device->port_num = pcie_device_pg0.PortNum;
8177 pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) &
8178 MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
8180 pcie_device->enclosure_handle =
8181 le16_to_cpu(pcie_device_pg0.EnclosureHandle);
8182 if (pcie_device->enclosure_handle != 0)
8183 pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot);
8185 if (le32_to_cpu(pcie_device_pg0.Flags) &
8186 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
8187 pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel;
8188 memcpy(&pcie_device->connector_name[0],
8189 &pcie_device_pg0.ConnectorName[0], 4);
8191 pcie_device->enclosure_level = 0;
8192 pcie_device->connector_name[0] = '\0';
8195 /* get enclosure_logical_id */
8196 if (pcie_device->enclosure_handle) {
8198 mpt3sas_scsih_enclosure_find_by_handle(ioc,
8199 pcie_device->enclosure_handle);
8201 pcie_device->enclosure_logical_id =
8202 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
8204 /* TODO -- Add device name once FW supports it */
8205 if (!(mpt3sas_scsih_is_pcie_scsi_device(
8206 le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
8207 pcie_device->nvme_mdts =
8208 le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
8209 pcie_device->shutdown_latency =
8210 le16_to_cpu(pcie_device_pg2.ShutdownLatency);
8212 * Set IOC's max_shutdown_latency to drive's RTD3 Entry Latency
8213 * if drive's RTD3 Entry Latency is greater then IOC's
8214 * max_shutdown_latency.
8216 if (pcie_device->shutdown_latency > ioc->max_shutdown_latency)
8217 ioc->max_shutdown_latency =
8218 pcie_device->shutdown_latency;
8219 if (pcie_device_pg2.ControllerResetTO)
8220 pcie_device->reset_timeout =
8221 pcie_device_pg2.ControllerResetTO;
8223 pcie_device->reset_timeout = 30;
8225 pcie_device->reset_timeout = 30;
8227 if (ioc->wait_for_discovery_to_complete)
8228 _scsih_pcie_device_init_add(ioc, pcie_device);
8230 _scsih_pcie_device_add(ioc, pcie_device);
8232 pcie_device_put(pcie_device);
8237 * _scsih_pcie_topology_change_event_debug - debug for topology
8239 * @ioc: per adapter object
8240 * @event_data: event data payload
8244 _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8245 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
8251 char *status_str = NULL;
8252 u8 link_rate, prev_link_rate;
8254 switch (event_data->SwitchStatus) {
8255 case MPI26_EVENT_PCIE_TOPO_SS_ADDED:
8258 case MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
8259 status_str = "remove";
8261 case MPI26_EVENT_PCIE_TOPO_SS_RESPONDING:
8263 status_str = "responding";
8265 case MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
8266 status_str = "remove delay";
8269 status_str = "unknown status";
8272 ioc_info(ioc, "pcie topology change: (%s)\n", status_str);
8273 pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)"
8274 "start_port(%02d), count(%d)\n",
8275 le16_to_cpu(event_data->SwitchDevHandle),
8276 le16_to_cpu(event_data->EnclosureHandle),
8277 event_data->StartPortNum, event_data->NumEntries);
8278 for (i = 0; i < event_data->NumEntries; i++) {
8280 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
8283 port_number = event_data->StartPortNum + i;
8284 reason_code = event_data->PortEntry[i].PortStatus;
8285 switch (reason_code) {
8286 case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
8287 status_str = "target add";
8289 case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
8290 status_str = "target remove";
8292 case MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
8293 status_str = "delay target remove";
8295 case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
8296 status_str = "link rate change";
8298 case MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE:
8299 status_str = "target responding";
8302 status_str = "unknown";
8305 link_rate = event_data->PortEntry[i].CurrentPortInfo &
8306 MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8307 prev_link_rate = event_data->PortEntry[i].PreviousPortInfo &
8308 MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8309 pr_info("\tport(%02d), attached_handle(0x%04x): %s:"
8310 " link rate: new(0x%02x), old(0x%02x)\n", port_number,
8311 handle, status_str, link_rate, prev_link_rate);
8316 * _scsih_pcie_topology_change_event - handle PCIe topology
8318 * @ioc: per adapter object
8319 * @fw_event: The fw_event_work object
8324 _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
8325 struct fw_event_work *fw_event)
8330 u8 link_rate, prev_link_rate;
8331 unsigned long flags;
8333 Mpi26EventDataPCIeTopologyChangeList_t *event_data =
8334 (Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data;
8335 struct _pcie_device *pcie_device;
8337 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8338 _scsih_pcie_topology_change_event_debug(ioc, event_data);
8340 if (ioc->shost_recovery || ioc->remove_host ||
8341 ioc->pci_error_recovery)
8344 if (fw_event->ignore) {
8345 dewtprintk(ioc, ioc_info(ioc, "ignoring switch event\n"));
8349 /* handle siblings events */
8350 for (i = 0; i < event_data->NumEntries; i++) {
8351 if (fw_event->ignore) {
8353 ioc_info(ioc, "ignoring switch event\n"));
8356 if (ioc->remove_host || ioc->pci_error_recovery)
8358 reason_code = event_data->PortEntry[i].PortStatus;
8360 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
8364 link_rate = event_data->PortEntry[i].CurrentPortInfo
8365 & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8366 prev_link_rate = event_data->PortEntry[i].PreviousPortInfo
8367 & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8369 switch (reason_code) {
8370 case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
8371 if (ioc->shost_recovery)
8373 if (link_rate == prev_link_rate)
8375 if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
8378 _scsih_pcie_check_device(ioc, handle);
8380 /* This code after this point handles the test case
8381 * where a device has been added, however its returning
8382 * BUSY for sometime. Then before the Device Missing
8383 * Delay expires and the device becomes READY, the
8384 * device is removed and added back.
8386 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8387 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
8388 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8391 pcie_device_put(pcie_device);
8395 if (!test_bit(handle, ioc->pend_os_device_add))
8399 ioc_info(ioc, "handle(0x%04x) device not found: convert event to a device add\n",
8401 event_data->PortEntry[i].PortStatus &= 0xF0;
8402 event_data->PortEntry[i].PortStatus |=
8403 MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
8405 case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
8406 if (ioc->shost_recovery)
8408 if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
8411 rc = _scsih_pcie_add_device(ioc, handle);
8413 /* mark entry vacant */
8414 /* TODO This needs to be reviewed and fixed,
8415 * we dont have an entry
8416 * to make an event void like vacant
8418 event_data->PortEntry[i].PortStatus |=
8419 MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE;
8422 case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
8423 _scsih_pcie_device_remove_by_handle(ioc, handle);
8430 * _scsih_pcie_device_status_change_event_debug - debug for device event
8432 * @event_data: event data payload
8436 _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8437 Mpi26EventDataPCIeDeviceStatusChange_t *event_data)
8439 char *reason_str = NULL;
8441 switch (event_data->ReasonCode) {
8442 case MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA:
8443 reason_str = "smart data";
8445 case MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED:
8446 reason_str = "unsupported device discovered";
8448 case MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET:
8449 reason_str = "internal device reset";
8451 case MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL:
8452 reason_str = "internal task abort";
8454 case MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
8455 reason_str = "internal task abort set";
8457 case MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
8458 reason_str = "internal clear task set";
8460 case MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL:
8461 reason_str = "internal query task";
8463 case MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE:
8464 reason_str = "device init failure";
8466 case MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
8467 reason_str = "internal device reset complete";
8469 case MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
8470 reason_str = "internal task abort complete";
8472 case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION:
8473 reason_str = "internal async notification";
8475 case MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED:
8476 reason_str = "pcie hot reset failed";
8479 reason_str = "unknown reason";
8483 ioc_info(ioc, "PCIE device status change: (%s)\n"
8484 "\thandle(0x%04x), WWID(0x%016llx), tag(%d)",
8485 reason_str, le16_to_cpu(event_data->DevHandle),
8486 (u64)le64_to_cpu(event_data->WWID),
8487 le16_to_cpu(event_data->TaskTag));
8488 if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA)
8489 pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
8490 event_data->ASC, event_data->ASCQ);
8495 * _scsih_pcie_device_status_change_event - handle device status
8497 * @ioc: per adapter object
8498 * @fw_event: The fw_event_work object
8502 _scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
8503 struct fw_event_work *fw_event)
8505 struct MPT3SAS_TARGET *target_priv_data;
8506 struct _pcie_device *pcie_device;
8508 unsigned long flags;
8509 Mpi26EventDataPCIeDeviceStatusChange_t *event_data =
8510 (Mpi26EventDataPCIeDeviceStatusChange_t *)fw_event->event_data;
8511 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8512 _scsih_pcie_device_status_change_event_debug(ioc,
8515 if (event_data->ReasonCode !=
8516 MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET &&
8517 event_data->ReasonCode !=
8518 MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
8521 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8522 wwid = le64_to_cpu(event_data->WWID);
8523 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
8525 if (!pcie_device || !pcie_device->starget)
8528 target_priv_data = pcie_device->starget->hostdata;
8529 if (!target_priv_data)
8532 if (event_data->ReasonCode ==
8533 MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET)
8534 target_priv_data->tm_busy = 1;
8536 target_priv_data->tm_busy = 0;
8539 pcie_device_put(pcie_device);
8541 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8545 * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure
8547 * @ioc: per adapter object
8548 * @event_data: event data payload
8552 _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8553 Mpi2EventDataSasEnclDevStatusChange_t *event_data)
8555 char *reason_str = NULL;
8557 switch (event_data->ReasonCode) {
8558 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
8559 reason_str = "enclosure add";
8561 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
8562 reason_str = "enclosure remove";
8565 reason_str = "unknown reason";
8569 ioc_info(ioc, "enclosure status change: (%s)\n"
8570 "\thandle(0x%04x), enclosure logical id(0x%016llx) number slots(%d)\n",
8572 le16_to_cpu(event_data->EnclosureHandle),
8573 (u64)le64_to_cpu(event_data->EnclosureLogicalID),
8574 le16_to_cpu(event_data->StartSlot));
8578 * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events
8579 * @ioc: per adapter object
8580 * @fw_event: The fw_event_work object
8584 _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
8585 struct fw_event_work *fw_event)
8587 Mpi2ConfigReply_t mpi_reply;
8588 struct _enclosure_node *enclosure_dev = NULL;
8589 Mpi2EventDataSasEnclDevStatusChange_t *event_data =
8590 (Mpi2EventDataSasEnclDevStatusChange_t *)fw_event->event_data;
8592 u16 enclosure_handle = le16_to_cpu(event_data->EnclosureHandle);
8594 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8595 _scsih_sas_enclosure_dev_status_change_event_debug(ioc,
8596 (Mpi2EventDataSasEnclDevStatusChange_t *)
8597 fw_event->event_data);
8598 if (ioc->shost_recovery)
8601 if (enclosure_handle)
8603 mpt3sas_scsih_enclosure_find_by_handle(ioc,
8605 switch (event_data->ReasonCode) {
8606 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
8607 if (!enclosure_dev) {
8609 kzalloc(sizeof(struct _enclosure_node),
8611 if (!enclosure_dev) {
8612 ioc_info(ioc, "failure at %s:%d/%s()!\n",
8613 __FILE__, __LINE__, __func__);
8616 rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
8617 &enclosure_dev->pg0,
8618 MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
8621 if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
8622 MPI2_IOCSTATUS_MASK)) {
8623 kfree(enclosure_dev);
8627 list_add_tail(&enclosure_dev->list,
8628 &ioc->enclosure_list);
8631 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
8632 if (enclosure_dev) {
8633 list_del(&enclosure_dev->list);
8634 kfree(enclosure_dev);
8643 * _scsih_sas_broadcast_primitive_event - handle broadcast events
8644 * @ioc: per adapter object
8645 * @fw_event: The fw_event_work object
8649 _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
8650 struct fw_event_work *fw_event)
8652 struct scsi_cmnd *scmd;
8653 struct scsi_device *sdev;
8654 struct scsiio_tracker *st;
8657 struct MPT3SAS_DEVICE *sas_device_priv_data;
8658 u32 termination_count;
8660 Mpi2SCSITaskManagementReply_t *mpi_reply;
8661 Mpi2EventDataSasBroadcastPrimitive_t *event_data =
8662 (Mpi2EventDataSasBroadcastPrimitive_t *)
8663 fw_event->event_data;
8665 unsigned long flags;
8668 u8 task_abort_retries;
8670 mutex_lock(&ioc->tm_cmds.mutex);
8671 ioc_info(ioc, "%s: enter: phy number(%d), width(%d)\n",
8672 __func__, event_data->PhyNum, event_data->PortWidth);
8674 _scsih_block_io_all_device(ioc);
8676 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8677 mpi_reply = ioc->tm_cmds.reply;
8678 broadcast_aen_retry:
8680 /* sanity checks for retrying this loop */
8681 if (max_retries++ == 5) {
8682 dewtprintk(ioc, ioc_info(ioc, "%s: giving up\n", __func__));
8684 } else if (max_retries > 1)
8686 ioc_info(ioc, "%s: %d retry\n",
8687 __func__, max_retries - 1));
8689 termination_count = 0;
8691 for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
8692 if (ioc->shost_recovery)
8694 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
8697 st = scsi_cmd_priv(scmd);
8698 sdev = scmd->device;
8699 sas_device_priv_data = sdev->hostdata;
8700 if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
8702 /* skip hidden raid components */
8703 if (sas_device_priv_data->sas_target->flags &
8704 MPT_TARGET_FLAGS_RAID_COMPONENT)
8707 if (sas_device_priv_data->sas_target->flags &
8708 MPT_TARGET_FLAGS_VOLUME)
8710 /* skip PCIe devices */
8711 if (sas_device_priv_data->sas_target->flags &
8712 MPT_TARGET_FLAGS_PCIE_DEVICE)
8715 handle = sas_device_priv_data->sas_target->handle;
8716 lun = sas_device_priv_data->lun;
8719 if (ioc->shost_recovery)
8722 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
8723 r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
8724 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid,
8725 st->msix_io, 30, 0);
8727 sdev_printk(KERN_WARNING, sdev,
8728 "mpt3sas_scsih_issue_tm: FAILED when sending "
8729 "QUERY_TASK: scmd(%p)\n", scmd);
8730 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8731 goto broadcast_aen_retry;
8733 ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
8734 & MPI2_IOCSTATUS_MASK;
8735 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8736 sdev_printk(KERN_WARNING, sdev,
8737 "query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n",
8739 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8740 goto broadcast_aen_retry;
8743 /* see if IO is still owned by IOC and target */
8744 if (mpi_reply->ResponseCode ==
8745 MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
8746 mpi_reply->ResponseCode ==
8747 MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) {
8748 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8751 task_abort_retries = 0;
8753 if (task_abort_retries++ == 60) {
8755 ioc_info(ioc, "%s: ABORT_TASK: giving up\n",
8757 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8758 goto broadcast_aen_retry;
8761 if (ioc->shost_recovery)
8764 r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
8765 sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
8766 st->smid, st->msix_io, 30, 0);
8767 if (r == FAILED || st->cb_idx != 0xFF) {
8768 sdev_printk(KERN_WARNING, sdev,
8769 "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
8770 "scmd(%p)\n", scmd);
8774 if (task_abort_retries > 1)
8775 sdev_printk(KERN_WARNING, sdev,
8776 "mpt3sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):"
8778 task_abort_retries - 1, scmd);
8780 termination_count += le32_to_cpu(mpi_reply->TerminationCount);
8781 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8784 if (ioc->broadcast_aen_pending) {
8787 "%s: loop back due to pending AEN\n",
8789 ioc->broadcast_aen_pending = 0;
8790 goto broadcast_aen_retry;
8794 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
8798 ioc_info(ioc, "%s - exit, query_count = %d termination_count = %d\n",
8799 __func__, query_count, termination_count));
8801 ioc->broadcast_aen_busy = 0;
8802 if (!ioc->shost_recovery)
8803 _scsih_ublock_io_all_device(ioc);
8804 mutex_unlock(&ioc->tm_cmds.mutex);
8808 * _scsih_sas_discovery_event - handle discovery events
8809 * @ioc: per adapter object
8810 * @fw_event: The fw_event_work object
8814 _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
8815 struct fw_event_work *fw_event)
8817 Mpi2EventDataSasDiscovery_t *event_data =
8818 (Mpi2EventDataSasDiscovery_t *) fw_event->event_data;
8820 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
8821 ioc_info(ioc, "discovery event: (%s)",
8822 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
8824 if (event_data->DiscoveryStatus)
8825 pr_cont("discovery_status(0x%08x)",
8826 le32_to_cpu(event_data->DiscoveryStatus));
8830 if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED &&
8831 !ioc->sas_hba.num_phys) {
8832 if (disable_discovery > 0 && ioc->shost_recovery) {
8833 /* Wait for the reset to complete */
8834 while (ioc->shost_recovery)
8837 _scsih_sas_host_add(ioc);
8842 * _scsih_sas_device_discovery_error_event - display SAS device discovery error
8844 * @ioc: per adapter object
8845 * @fw_event: The fw_event_work object
8849 _scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc,
8850 struct fw_event_work *fw_event)
8852 Mpi25EventDataSasDeviceDiscoveryError_t *event_data =
8853 (Mpi25EventDataSasDeviceDiscoveryError_t *)fw_event->event_data;
8855 switch (event_data->ReasonCode) {
8856 case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED:
8857 ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has failed\n",
8858 le16_to_cpu(event_data->DevHandle),
8859 (u64)le64_to_cpu(event_data->SASAddress),
8860 event_data->PhysicalPort);
8862 case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT:
8863 ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has timed out\n",
8864 le16_to_cpu(event_data->DevHandle),
8865 (u64)le64_to_cpu(event_data->SASAddress),
8866 event_data->PhysicalPort);
8874 * _scsih_pcie_enumeration_event - handle enumeration events
8875 * @ioc: per adapter object
8876 * @fw_event: The fw_event_work object
8880 _scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc,
8881 struct fw_event_work *fw_event)
8883 Mpi26EventDataPCIeEnumeration_t *event_data =
8884 (Mpi26EventDataPCIeEnumeration_t *)fw_event->event_data;
8886 if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK))
8889 ioc_info(ioc, "pcie enumeration event: (%s) Flag 0x%02x",
8890 (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
8891 "started" : "completed",
8893 if (event_data->EnumerationStatus)
8894 pr_cont("enumeration_status(0x%08x)",
8895 le32_to_cpu(event_data->EnumerationStatus));
8900 * _scsih_ir_fastpath - turn on fastpath for IR physdisk
8901 * @ioc: per adapter object
8902 * @handle: device handle for physical disk
8903 * @phys_disk_num: physical disk number
8905 * Return: 0 for success, else failure.
8908 _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
8910 Mpi2RaidActionRequest_t *mpi_request;
8911 Mpi2RaidActionReply_t *mpi_reply;
8918 if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
8921 mutex_lock(&ioc->scsih_cmds.mutex);
8923 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
8924 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
8928 ioc->scsih_cmds.status = MPT3_CMD_PENDING;
8930 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
8932 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
8933 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
8938 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
8939 ioc->scsih_cmds.smid = smid;
8940 memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
8942 mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
8943 mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN;
8944 mpi_request->PhysDiskNum = phys_disk_num;
8947 ioc_info(ioc, "IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n",
8948 handle, phys_disk_num));
8950 init_completion(&ioc->scsih_cmds.done);
8951 ioc->put_smid_default(ioc, smid);
8952 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
8954 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
8955 mpt3sas_check_cmd_timeout(ioc,
8956 ioc->scsih_cmds.status, mpi_request,
8957 sizeof(Mpi2RaidActionRequest_t)/4, issue_reset);
8962 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
8964 mpi_reply = ioc->scsih_cmds.reply;
8965 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
8966 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
8967 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
8970 ioc_status &= MPI2_IOCSTATUS_MASK;
8971 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8973 ioc_info(ioc, "IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n",
8974 ioc_status, log_info));
8978 ioc_info(ioc, "IR RAID_ACTION: completed successfully\n"));
8982 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
8983 mutex_unlock(&ioc->scsih_cmds.mutex);
8986 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
8991 * _scsih_reprobe_lun - reprobing lun
8992 * @sdev: scsi device struct
8993 * @no_uld_attach: sdev->no_uld_attach flag setting
8997 _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
8999 sdev->no_uld_attach = no_uld_attach ? 1 : 0;
9000 sdev_printk(KERN_INFO, sdev, "%s raid component\n",
9001 sdev->no_uld_attach ? "hiding" : "exposing");
9002 WARN_ON(scsi_device_reprobe(sdev));
9006 * _scsih_sas_volume_add - add new volume
9007 * @ioc: per adapter object
9008 * @element: IR config element data
9012 _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
9013 Mpi2EventIrConfigElement_t *element)
9015 struct _raid_device *raid_device;
9016 unsigned long flags;
9018 u16 handle = le16_to_cpu(element->VolDevHandle);
9021 mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
9023 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9024 __FILE__, __LINE__, __func__);
9028 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9029 raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid);
9030 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9035 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
9037 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9038 __FILE__, __LINE__, __func__);
9042 raid_device->id = ioc->sas_id++;
9043 raid_device->channel = RAID_CHANNEL;
9044 raid_device->handle = handle;
9045 raid_device->wwid = wwid;
9046 _scsih_raid_device_add(ioc, raid_device);
9047 if (!ioc->wait_for_discovery_to_complete) {
9048 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
9049 raid_device->id, 0);
9051 _scsih_raid_device_remove(ioc, raid_device);
9053 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9054 _scsih_determine_boot_device(ioc, raid_device, 1);
9055 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9060 * _scsih_sas_volume_delete - delete volume
9061 * @ioc: per adapter object
9062 * @handle: volume device handle
9066 _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
9068 struct _raid_device *raid_device;
9069 unsigned long flags;
9070 struct MPT3SAS_TARGET *sas_target_priv_data;
9071 struct scsi_target *starget = NULL;
9073 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9074 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
9076 if (raid_device->starget) {
9077 starget = raid_device->starget;
9078 sas_target_priv_data = starget->hostdata;
9079 sas_target_priv_data->deleted = 1;
9081 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
9082 raid_device->handle, (u64)raid_device->wwid);
9083 list_del(&raid_device->list);
9086 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9088 scsi_remove_target(&starget->dev);
9092 * _scsih_sas_pd_expose - expose pd component to /dev/sdX
9093 * @ioc: per adapter object
9094 * @element: IR config element data
9098 _scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc,
9099 Mpi2EventIrConfigElement_t *element)
9101 struct _sas_device *sas_device;
9102 struct scsi_target *starget = NULL;
9103 struct MPT3SAS_TARGET *sas_target_priv_data;
9104 unsigned long flags;
9105 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9107 spin_lock_irqsave(&ioc->sas_device_lock, flags);
9108 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
9110 sas_device->volume_handle = 0;
9111 sas_device->volume_wwid = 0;
9112 clear_bit(handle, ioc->pd_handles);
9113 if (sas_device->starget && sas_device->starget->hostdata) {
9114 starget = sas_device->starget;
9115 sas_target_priv_data = starget->hostdata;
9116 sas_target_priv_data->flags &=
9117 ~MPT_TARGET_FLAGS_RAID_COMPONENT;
9120 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9124 /* exposing raid component */
9126 starget_for_each_device(starget, NULL, _scsih_reprobe_lun);
9128 sas_device_put(sas_device);
9132 * _scsih_sas_pd_hide - hide pd component from /dev/sdX
9133 * @ioc: per adapter object
9134 * @element: IR config element data
9138 _scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc,
9139 Mpi2EventIrConfigElement_t *element)
9141 struct _sas_device *sas_device;
9142 struct scsi_target *starget = NULL;
9143 struct MPT3SAS_TARGET *sas_target_priv_data;
9144 unsigned long flags;
9145 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9146 u16 volume_handle = 0;
9147 u64 volume_wwid = 0;
9149 mpt3sas_config_get_volume_handle(ioc, handle, &volume_handle);
9151 mpt3sas_config_get_volume_wwid(ioc, volume_handle,
9154 spin_lock_irqsave(&ioc->sas_device_lock, flags);
9155 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
9157 set_bit(handle, ioc->pd_handles);
9158 if (sas_device->starget && sas_device->starget->hostdata) {
9159 starget = sas_device->starget;
9160 sas_target_priv_data = starget->hostdata;
9161 sas_target_priv_data->flags |=
9162 MPT_TARGET_FLAGS_RAID_COMPONENT;
9163 sas_device->volume_handle = volume_handle;
9164 sas_device->volume_wwid = volume_wwid;
9167 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9171 /* hiding raid component */
9172 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
9175 starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun);
9177 sas_device_put(sas_device);
9181 * _scsih_sas_pd_delete - delete pd component
9182 * @ioc: per adapter object
9183 * @element: IR config element data
9187 _scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc,
9188 Mpi2EventIrConfigElement_t *element)
9190 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9192 _scsih_device_remove_by_handle(ioc, handle);
9196 * _scsih_sas_pd_add - remove pd component
9197 * @ioc: per adapter object
9198 * @element: IR config element data
9202 _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
9203 Mpi2EventIrConfigElement_t *element)
9205 struct _sas_device *sas_device;
9206 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9207 Mpi2ConfigReply_t mpi_reply;
9208 Mpi2SasDevicePage0_t sas_device_pg0;
9213 set_bit(handle, ioc->pd_handles);
9215 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
9217 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
9218 sas_device_put(sas_device);
9222 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
9223 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
9224 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9225 __FILE__, __LINE__, __func__);
9229 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9230 MPI2_IOCSTATUS_MASK;
9231 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9232 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9233 __FILE__, __LINE__, __func__);
9237 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9238 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
9239 mpt3sas_transport_update_links(ioc, sas_address, handle,
9240 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
9241 mpt3sas_get_port_by_id(ioc,
9242 sas_device_pg0.PhysicalPort, 0));
9244 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
9245 _scsih_add_device(ioc, handle, 0, 1);
9249 * _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events
9250 * @ioc: per adapter object
9251 * @event_data: event data payload
9255 _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
9256 Mpi2EventDataIrConfigChangeList_t *event_data)
9258 Mpi2EventIrConfigElement_t *element;
9261 char *reason_str = NULL, *element_str = NULL;
9263 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
9265 ioc_info(ioc, "raid config change: (%s), elements(%d)\n",
9266 le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG ?
9267 "foreign" : "native",
9268 event_data->NumElements);
9269 for (i = 0; i < event_data->NumElements; i++, element++) {
9270 switch (element->ReasonCode) {
9271 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
9274 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
9275 reason_str = "remove";
9277 case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE:
9278 reason_str = "no change";
9280 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
9281 reason_str = "hide";
9283 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
9284 reason_str = "unhide";
9286 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
9287 reason_str = "volume_created";
9289 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
9290 reason_str = "volume_deleted";
9292 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
9293 reason_str = "pd_created";
9295 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
9296 reason_str = "pd_deleted";
9299 reason_str = "unknown reason";
9302 element_type = le16_to_cpu(element->ElementFlags) &
9303 MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK;
9304 switch (element_type) {
9305 case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT:
9306 element_str = "volume";
9308 case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT:
9309 element_str = "phys disk";
9311 case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT:
9312 element_str = "hot spare";
9315 element_str = "unknown element";
9318 pr_info("\t(%s:%s), vol handle(0x%04x), " \
9319 "pd handle(0x%04x), pd num(0x%02x)\n", element_str,
9320 reason_str, le16_to_cpu(element->VolDevHandle),
9321 le16_to_cpu(element->PhysDiskDevHandle),
9322 element->PhysDiskNum);
9327 * _scsih_sas_ir_config_change_event - handle ir configuration change events
9328 * @ioc: per adapter object
9329 * @fw_event: The fw_event_work object
9333 _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc,
9334 struct fw_event_work *fw_event)
9336 Mpi2EventIrConfigElement_t *element;
9339 Mpi2EventDataIrConfigChangeList_t *event_data =
9340 (Mpi2EventDataIrConfigChangeList_t *)
9341 fw_event->event_data;
9343 if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
9344 (!ioc->hide_ir_msg))
9345 _scsih_sas_ir_config_change_event_debug(ioc, event_data);
9347 foreign_config = (le32_to_cpu(event_data->Flags) &
9348 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0;
9350 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
9351 if (ioc->shost_recovery &&
9352 ioc->hba_mpi_version_belonged != MPI2_VERSION) {
9353 for (i = 0; i < event_data->NumElements; i++, element++) {
9354 if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE)
9355 _scsih_ir_fastpath(ioc,
9356 le16_to_cpu(element->PhysDiskDevHandle),
9357 element->PhysDiskNum);
9362 for (i = 0; i < event_data->NumElements; i++, element++) {
9364 switch (element->ReasonCode) {
9365 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
9366 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
9367 if (!foreign_config)
9368 _scsih_sas_volume_add(ioc, element);
9370 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
9371 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
9372 if (!foreign_config)
9373 _scsih_sas_volume_delete(ioc,
9374 le16_to_cpu(element->VolDevHandle));
9376 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
9377 if (!ioc->is_warpdrive)
9378 _scsih_sas_pd_hide(ioc, element);
9380 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
9381 if (!ioc->is_warpdrive)
9382 _scsih_sas_pd_expose(ioc, element);
9384 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
9385 if (!ioc->is_warpdrive)
9386 _scsih_sas_pd_add(ioc, element);
9388 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
9389 if (!ioc->is_warpdrive)
9390 _scsih_sas_pd_delete(ioc, element);
9397 * _scsih_sas_ir_volume_event - IR volume event
9398 * @ioc: per adapter object
9399 * @fw_event: The fw_event_work object
9403 _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
9404 struct fw_event_work *fw_event)
9407 unsigned long flags;
9408 struct _raid_device *raid_device;
9412 Mpi2EventDataIrVolume_t *event_data =
9413 (Mpi2EventDataIrVolume_t *) fw_event->event_data;
9415 if (ioc->shost_recovery)
9418 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
9421 handle = le16_to_cpu(event_data->VolDevHandle);
9422 state = le32_to_cpu(event_data->NewValue);
9423 if (!ioc->hide_ir_msg)
9425 ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
9427 le32_to_cpu(event_data->PreviousValue),
9430 case MPI2_RAID_VOL_STATE_MISSING:
9431 case MPI2_RAID_VOL_STATE_FAILED:
9432 _scsih_sas_volume_delete(ioc, handle);
9435 case MPI2_RAID_VOL_STATE_ONLINE:
9436 case MPI2_RAID_VOL_STATE_DEGRADED:
9437 case MPI2_RAID_VOL_STATE_OPTIMAL:
9439 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9440 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
9441 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9446 mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
9448 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9449 __FILE__, __LINE__, __func__);
9453 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
9455 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9456 __FILE__, __LINE__, __func__);
9460 raid_device->id = ioc->sas_id++;
9461 raid_device->channel = RAID_CHANNEL;
9462 raid_device->handle = handle;
9463 raid_device->wwid = wwid;
9464 _scsih_raid_device_add(ioc, raid_device);
9465 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
9466 raid_device->id, 0);
9468 _scsih_raid_device_remove(ioc, raid_device);
9471 case MPI2_RAID_VOL_STATE_INITIALIZING:
9478 * _scsih_sas_ir_physical_disk_event - PD event
9479 * @ioc: per adapter object
9480 * @fw_event: The fw_event_work object
9484 _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
9485 struct fw_event_work *fw_event)
9487 u16 handle, parent_handle;
9489 struct _sas_device *sas_device;
9490 Mpi2ConfigReply_t mpi_reply;
9491 Mpi2SasDevicePage0_t sas_device_pg0;
9493 Mpi2EventDataIrPhysicalDisk_t *event_data =
9494 (Mpi2EventDataIrPhysicalDisk_t *) fw_event->event_data;
9497 if (ioc->shost_recovery)
9500 if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
9503 handle = le16_to_cpu(event_data->PhysDiskDevHandle);
9504 state = le32_to_cpu(event_data->NewValue);
9506 if (!ioc->hide_ir_msg)
9508 ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
9510 le32_to_cpu(event_data->PreviousValue),
9514 case MPI2_RAID_PD_STATE_ONLINE:
9515 case MPI2_RAID_PD_STATE_DEGRADED:
9516 case MPI2_RAID_PD_STATE_REBUILDING:
9517 case MPI2_RAID_PD_STATE_OPTIMAL:
9518 case MPI2_RAID_PD_STATE_HOT_SPARE:
9520 if (!ioc->is_warpdrive)
9521 set_bit(handle, ioc->pd_handles);
9523 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
9525 sas_device_put(sas_device);
9529 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9530 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
9532 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9533 __FILE__, __LINE__, __func__);
9537 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9538 MPI2_IOCSTATUS_MASK;
9539 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9540 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9541 __FILE__, __LINE__, __func__);
9545 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9546 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
9547 mpt3sas_transport_update_links(ioc, sas_address, handle,
9548 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
9549 mpt3sas_get_port_by_id(ioc,
9550 sas_device_pg0.PhysicalPort, 0));
9552 _scsih_add_device(ioc, handle, 0, 1);
9556 case MPI2_RAID_PD_STATE_OFFLINE:
9557 case MPI2_RAID_PD_STATE_NOT_CONFIGURED:
9558 case MPI2_RAID_PD_STATE_NOT_COMPATIBLE:
9565 * _scsih_sas_ir_operation_status_event_debug - debug for IR op event
9566 * @ioc: per adapter object
9567 * @event_data: event data payload
9571 _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc,
9572 Mpi2EventDataIrOperationStatus_t *event_data)
9574 char *reason_str = NULL;
9576 switch (event_data->RAIDOperation) {
9577 case MPI2_EVENT_IR_RAIDOP_RESYNC:
9578 reason_str = "resync";
9580 case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
9581 reason_str = "online capacity expansion";
9583 case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
9584 reason_str = "consistency check";
9586 case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT:
9587 reason_str = "background init";
9589 case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT:
9590 reason_str = "make data consistent";
9597 ioc_info(ioc, "raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n",
9599 le16_to_cpu(event_data->VolDevHandle),
9600 event_data->PercentComplete);
9604 * _scsih_sas_ir_operation_status_event - handle RAID operation events
9605 * @ioc: per adapter object
9606 * @fw_event: The fw_event_work object
9610 _scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc,
9611 struct fw_event_work *fw_event)
9613 Mpi2EventDataIrOperationStatus_t *event_data =
9614 (Mpi2EventDataIrOperationStatus_t *)
9615 fw_event->event_data;
9616 static struct _raid_device *raid_device;
9617 unsigned long flags;
9620 if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
9621 (!ioc->hide_ir_msg))
9622 _scsih_sas_ir_operation_status_event_debug(ioc,
9625 /* code added for raid transport support */
9626 if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) {
9628 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9629 handle = le16_to_cpu(event_data->VolDevHandle);
9630 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
9632 raid_device->percent_complete =
9633 event_data->PercentComplete;
9634 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9639 * _scsih_prep_device_scan - initialize parameters prior to device scan
9640 * @ioc: per adapter object
9642 * Set the deleted flag prior to device scan. If the device is found during
9643 * the scan, then we clear the deleted flag.
9646 _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
9648 struct MPT3SAS_DEVICE *sas_device_priv_data;
9649 struct scsi_device *sdev;
9651 shost_for_each_device(sdev, ioc->shost) {
9652 sas_device_priv_data = sdev->hostdata;
9653 if (sas_device_priv_data && sas_device_priv_data->sas_target)
9654 sas_device_priv_data->sas_target->deleted = 1;
9659 * _scsih_update_device_qdepth - Update QD during Reset.
9660 * @ioc: per adapter object
9664 _scsih_update_device_qdepth(struct MPT3SAS_ADAPTER *ioc)
9666 struct MPT3SAS_DEVICE *sas_device_priv_data;
9667 struct MPT3SAS_TARGET *sas_target_priv_data;
9668 struct _sas_device *sas_device;
9669 struct scsi_device *sdev;
9672 ioc_info(ioc, "Update devices with firmware reported queue depth\n");
9673 shost_for_each_device(sdev, ioc->shost) {
9674 sas_device_priv_data = sdev->hostdata;
9675 if (sas_device_priv_data && sas_device_priv_data->sas_target) {
9676 sas_target_priv_data = sas_device_priv_data->sas_target;
9677 sas_device = sas_device_priv_data->sas_target->sas_dev;
9678 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE)
9679 qdepth = ioc->max_nvme_qd;
9680 else if (sas_device &&
9681 sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET)
9682 qdepth = (sas_device->port_type > 1) ?
9683 ioc->max_wideport_qd : ioc->max_narrowport_qd;
9684 else if (sas_device &&
9685 sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
9686 qdepth = ioc->max_sata_qd;
9689 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
9695 * _scsih_mark_responding_sas_device - mark a sas_devices as responding
9696 * @ioc: per adapter object
9697 * @sas_device_pg0: SAS Device page 0
9699 * After host reset, find out whether devices are still responding.
9700 * Used in _scsih_remove_unresponsive_sas_devices.
9703 _scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc,
9704 Mpi2SasDevicePage0_t *sas_device_pg0)
9706 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
9707 struct scsi_target *starget;
9708 struct _sas_device *sas_device = NULL;
9709 struct _enclosure_node *enclosure_dev = NULL;
9710 unsigned long flags;
9711 struct hba_port *port = mpt3sas_get_port_by_id(
9712 ioc, sas_device_pg0->PhysicalPort, 0);
9714 if (sas_device_pg0->EnclosureHandle) {
9716 mpt3sas_scsih_enclosure_find_by_handle(ioc,
9717 le16_to_cpu(sas_device_pg0->EnclosureHandle));
9718 if (enclosure_dev == NULL)
9719 ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
9720 sas_device_pg0->EnclosureHandle);
9722 spin_lock_irqsave(&ioc->sas_device_lock, flags);
9723 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
9724 if (sas_device->sas_address != le64_to_cpu(
9725 sas_device_pg0->SASAddress))
9727 if (sas_device->slot != le16_to_cpu(sas_device_pg0->Slot))
9729 if (sas_device->port != port)
9731 sas_device->responding = 1;
9732 starget = sas_device->starget;
9733 if (starget && starget->hostdata) {
9734 sas_target_priv_data = starget->hostdata;
9735 sas_target_priv_data->tm_busy = 0;
9736 sas_target_priv_data->deleted = 0;
9738 sas_target_priv_data = NULL;
9740 starget_printk(KERN_INFO, starget,
9741 "handle(0x%04x), sas_addr(0x%016llx)\n",
9742 le16_to_cpu(sas_device_pg0->DevHandle),
9743 (unsigned long long)
9744 sas_device->sas_address);
9746 if (sas_device->enclosure_handle != 0)
9747 starget_printk(KERN_INFO, starget,
9748 "enclosure logical id(0x%016llx), slot(%d)\n",
9749 (unsigned long long)
9750 sas_device->enclosure_logical_id,
9753 if (le16_to_cpu(sas_device_pg0->Flags) &
9754 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
9755 sas_device->enclosure_level =
9756 sas_device_pg0->EnclosureLevel;
9757 memcpy(&sas_device->connector_name[0],
9758 &sas_device_pg0->ConnectorName[0], 4);
9760 sas_device->enclosure_level = 0;
9761 sas_device->connector_name[0] = '\0';
9764 sas_device->enclosure_handle =
9765 le16_to_cpu(sas_device_pg0->EnclosureHandle);
9766 sas_device->is_chassis_slot_valid = 0;
9767 if (enclosure_dev) {
9768 sas_device->enclosure_logical_id = le64_to_cpu(
9769 enclosure_dev->pg0.EnclosureLogicalID);
9770 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
9771 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
9772 sas_device->is_chassis_slot_valid = 1;
9773 sas_device->chassis_slot =
9774 enclosure_dev->pg0.ChassisSlot;
9778 if (sas_device->handle == le16_to_cpu(
9779 sas_device_pg0->DevHandle))
9781 pr_info("\thandle changed from(0x%04x)!!!\n",
9782 sas_device->handle);
9783 sas_device->handle = le16_to_cpu(
9784 sas_device_pg0->DevHandle);
9785 if (sas_target_priv_data)
9786 sas_target_priv_data->handle =
9787 le16_to_cpu(sas_device_pg0->DevHandle);
9791 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9795 * _scsih_create_enclosure_list_after_reset - Free Existing list,
9796 * And create enclosure list by scanning all Enclosure Page(0)s
9797 * @ioc: per adapter object
9800 _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc)
9802 struct _enclosure_node *enclosure_dev;
9803 Mpi2ConfigReply_t mpi_reply;
9804 u16 enclosure_handle;
9807 /* Free existing enclosure list */
9808 mpt3sas_free_enclosure_list(ioc);
9810 /* Re constructing enclosure list after reset*/
9811 enclosure_handle = 0xFFFF;
9814 kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL);
9815 if (!enclosure_dev) {
9816 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9817 __FILE__, __LINE__, __func__);
9820 rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
9821 &enclosure_dev->pg0,
9822 MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE,
9825 if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
9826 MPI2_IOCSTATUS_MASK)) {
9827 kfree(enclosure_dev);
9830 list_add_tail(&enclosure_dev->list,
9831 &ioc->enclosure_list);
9833 le16_to_cpu(enclosure_dev->pg0.EnclosureHandle);
9838 * _scsih_search_responding_sas_devices -
9839 * @ioc: per adapter object
9841 * After host reset, find out whether devices are still responding.
9845 _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
9847 Mpi2SasDevicePage0_t sas_device_pg0;
9848 Mpi2ConfigReply_t mpi_reply;
9853 ioc_info(ioc, "search for end-devices: start\n");
9855 if (list_empty(&ioc->sas_device_list))
9859 while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9860 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9862 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9863 MPI2_IOCSTATUS_MASK;
9864 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
9866 handle = le16_to_cpu(sas_device_pg0.DevHandle);
9867 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
9868 if (!(_scsih_is_end_device(device_info)))
9870 _scsih_mark_responding_sas_device(ioc, &sas_device_pg0);
9874 ioc_info(ioc, "search for end-devices: complete\n");
9878 * _scsih_mark_responding_pcie_device - mark a pcie_device as responding
9879 * @ioc: per adapter object
9880 * @pcie_device_pg0: PCIe Device page 0
9882 * After host reset, find out whether devices are still responding.
9883 * Used in _scsih_remove_unresponding_devices.
9886 _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
9887 Mpi26PCIeDevicePage0_t *pcie_device_pg0)
9889 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
9890 struct scsi_target *starget;
9891 struct _pcie_device *pcie_device;
9892 unsigned long flags;
9894 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
9895 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
9896 if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID))
9897 && (pcie_device->slot == le16_to_cpu(
9898 pcie_device_pg0->Slot))) {
9899 pcie_device->access_status =
9900 pcie_device_pg0->AccessStatus;
9901 pcie_device->responding = 1;
9902 starget = pcie_device->starget;
9903 if (starget && starget->hostdata) {
9904 sas_target_priv_data = starget->hostdata;
9905 sas_target_priv_data->tm_busy = 0;
9906 sas_target_priv_data->deleted = 0;
9908 sas_target_priv_data = NULL;
9910 starget_printk(KERN_INFO, starget,
9911 "handle(0x%04x), wwid(0x%016llx) ",
9912 pcie_device->handle,
9913 (unsigned long long)pcie_device->wwid);
9914 if (pcie_device->enclosure_handle != 0)
9915 starget_printk(KERN_INFO, starget,
9916 "enclosure logical id(0x%016llx), "
9918 (unsigned long long)
9919 pcie_device->enclosure_logical_id,
9923 if (((le32_to_cpu(pcie_device_pg0->Flags)) &
9924 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) &&
9925 (ioc->hba_mpi_version_belonged != MPI2_VERSION)) {
9926 pcie_device->enclosure_level =
9927 pcie_device_pg0->EnclosureLevel;
9928 memcpy(&pcie_device->connector_name[0],
9929 &pcie_device_pg0->ConnectorName[0], 4);
9931 pcie_device->enclosure_level = 0;
9932 pcie_device->connector_name[0] = '\0';
9935 if (pcie_device->handle == le16_to_cpu(
9936 pcie_device_pg0->DevHandle))
9938 pr_info("\thandle changed from(0x%04x)!!!\n",
9939 pcie_device->handle);
9940 pcie_device->handle = le16_to_cpu(
9941 pcie_device_pg0->DevHandle);
9942 if (sas_target_priv_data)
9943 sas_target_priv_data->handle =
9944 le16_to_cpu(pcie_device_pg0->DevHandle);
9950 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
9954 * _scsih_search_responding_pcie_devices -
9955 * @ioc: per adapter object
9957 * After host reset, find out whether devices are still responding.
9961 _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
9963 Mpi26PCIeDevicePage0_t pcie_device_pg0;
9964 Mpi2ConfigReply_t mpi_reply;
9969 ioc_info(ioc, "search for end-devices: start\n");
9971 if (list_empty(&ioc->pcie_device_list))
9975 while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
9976 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9978 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9979 MPI2_IOCSTATUS_MASK;
9980 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9981 ioc_info(ioc, "\tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n",
9982 __func__, ioc_status,
9983 le32_to_cpu(mpi_reply.IOCLogInfo));
9986 handle = le16_to_cpu(pcie_device_pg0.DevHandle);
9987 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
9988 if (!(_scsih_is_nvme_pciescsi_device(device_info)))
9990 _scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0);
9993 ioc_info(ioc, "search for PCIe end-devices: complete\n");
9997 * _scsih_mark_responding_raid_device - mark a raid_device as responding
9998 * @ioc: per adapter object
9999 * @wwid: world wide identifier for raid volume
10000 * @handle: device handle
10002 * After host reset, find out whether devices are still responding.
10003 * Used in _scsih_remove_unresponsive_raid_devices.
10006 _scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
10009 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
10010 struct scsi_target *starget;
10011 struct _raid_device *raid_device;
10012 unsigned long flags;
10014 spin_lock_irqsave(&ioc->raid_device_lock, flags);
10015 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
10016 if (raid_device->wwid == wwid && raid_device->starget) {
10017 starget = raid_device->starget;
10018 if (starget && starget->hostdata) {
10019 sas_target_priv_data = starget->hostdata;
10020 sas_target_priv_data->deleted = 0;
10022 sas_target_priv_data = NULL;
10023 raid_device->responding = 1;
10024 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10025 starget_printk(KERN_INFO, raid_device->starget,
10026 "handle(0x%04x), wwid(0x%016llx)\n", handle,
10027 (unsigned long long)raid_device->wwid);
10030 * WARPDRIVE: The handles of the PDs might have changed
10031 * across the host reset so re-initialize the
10032 * required data for Direct IO
10034 mpt3sas_init_warpdrive_properties(ioc, raid_device);
10035 spin_lock_irqsave(&ioc->raid_device_lock, flags);
10036 if (raid_device->handle == handle) {
10037 spin_unlock_irqrestore(&ioc->raid_device_lock,
10041 pr_info("\thandle changed from(0x%04x)!!!\n",
10042 raid_device->handle);
10043 raid_device->handle = handle;
10044 if (sas_target_priv_data)
10045 sas_target_priv_data->handle = handle;
10046 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10050 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10054 * _scsih_search_responding_raid_devices -
10055 * @ioc: per adapter object
10057 * After host reset, find out whether devices are still responding.
10061 _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
10063 Mpi2RaidVolPage1_t volume_pg1;
10064 Mpi2RaidVolPage0_t volume_pg0;
10065 Mpi2RaidPhysDiskPage0_t pd_pg0;
10066 Mpi2ConfigReply_t mpi_reply;
10071 if (!ioc->ir_firmware)
10074 ioc_info(ioc, "search for raid volumes: start\n");
10076 if (list_empty(&ioc->raid_device_list))
10080 while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
10081 &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
10082 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10083 MPI2_IOCSTATUS_MASK;
10084 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
10086 handle = le16_to_cpu(volume_pg1.DevHandle);
10088 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
10089 &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
10090 sizeof(Mpi2RaidVolPage0_t)))
10093 if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
10094 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
10095 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED)
10096 _scsih_mark_responding_raid_device(ioc,
10097 le64_to_cpu(volume_pg1.WWID), handle);
10100 /* refresh the pd_handles */
10101 if (!ioc->is_warpdrive) {
10102 phys_disk_num = 0xFF;
10103 memset(ioc->pd_handles, 0, ioc->pd_handles_sz);
10104 while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
10105 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
10107 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10108 MPI2_IOCSTATUS_MASK;
10109 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
10111 phys_disk_num = pd_pg0.PhysDiskNum;
10112 handle = le16_to_cpu(pd_pg0.DevHandle);
10113 set_bit(handle, ioc->pd_handles);
10117 ioc_info(ioc, "search for responding raid volumes: complete\n");
10121 * _scsih_mark_responding_expander - mark a expander as responding
10122 * @ioc: per adapter object
10123 * @expander_pg0:SAS Expander Config Page0
10125 * After host reset, find out whether devices are still responding.
10126 * Used in _scsih_remove_unresponsive_expanders.
10129 _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
10130 Mpi2ExpanderPage0_t *expander_pg0)
10132 struct _sas_node *sas_expander = NULL;
10133 unsigned long flags;
10135 struct _enclosure_node *enclosure_dev = NULL;
10136 u16 handle = le16_to_cpu(expander_pg0->DevHandle);
10137 u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle);
10138 u64 sas_address = le64_to_cpu(expander_pg0->SASAddress);
10139 struct hba_port *port = mpt3sas_get_port_by_id(
10140 ioc, expander_pg0->PhysicalPort, 0);
10142 if (enclosure_handle)
10144 mpt3sas_scsih_enclosure_find_by_handle(ioc,
10147 spin_lock_irqsave(&ioc->sas_node_lock, flags);
10148 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
10149 if (sas_expander->sas_address != sas_address)
10151 if (sas_expander->port != port)
10153 sas_expander->responding = 1;
10155 if (enclosure_dev) {
10156 sas_expander->enclosure_logical_id =
10157 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
10158 sas_expander->enclosure_handle =
10159 le16_to_cpu(expander_pg0->EnclosureHandle);
10162 if (sas_expander->handle == handle)
10164 pr_info("\texpander(0x%016llx): handle changed" \
10165 " from(0x%04x) to (0x%04x)!!!\n",
10166 (unsigned long long)sas_expander->sas_address,
10167 sas_expander->handle, handle);
10168 sas_expander->handle = handle;
10169 for (i = 0 ; i < sas_expander->num_phys ; i++)
10170 sas_expander->phy[i].handle = handle;
10174 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10178 * _scsih_search_responding_expanders -
10179 * @ioc: per adapter object
10181 * After host reset, find out whether devices are still responding.
10185 _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
10187 Mpi2ExpanderPage0_t expander_pg0;
10188 Mpi2ConfigReply_t mpi_reply;
10194 ioc_info(ioc, "search for expanders: start\n");
10196 if (list_empty(&ioc->sas_expander_list))
10200 while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
10201 MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
10203 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10204 MPI2_IOCSTATUS_MASK;
10205 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
10208 handle = le16_to_cpu(expander_pg0.DevHandle);
10209 sas_address = le64_to_cpu(expander_pg0.SASAddress);
10210 port = expander_pg0.PhysicalPort;
10212 "\texpander present: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
10213 handle, (unsigned long long)sas_address,
10214 (ioc->multipath_on_hba ?
10215 port : MULTIPATH_DISABLED_PORT_ID));
10216 _scsih_mark_responding_expander(ioc, &expander_pg0);
10220 ioc_info(ioc, "search for expanders: complete\n");
10224 * _scsih_remove_unresponding_devices - removing unresponding devices
10225 * @ioc: per adapter object
10228 _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
10230 struct _sas_device *sas_device, *sas_device_next;
10231 struct _sas_node *sas_expander, *sas_expander_next;
10232 struct _raid_device *raid_device, *raid_device_next;
10233 struct _pcie_device *pcie_device, *pcie_device_next;
10234 struct list_head tmp_list;
10235 unsigned long flags;
10238 ioc_info(ioc, "removing unresponding devices: start\n");
10240 /* removing unresponding end devices */
10241 ioc_info(ioc, "removing unresponding devices: end-devices\n");
10243 * Iterate, pulling off devices marked as non-responding. We become the
10244 * owner for the reference the list had on any object we prune.
10246 spin_lock_irqsave(&ioc->sas_device_lock, flags);
10249 * Clean up the sas_device_init_list list as
10250 * driver goes for fresh scan as part of diag reset.
10252 list_for_each_entry_safe(sas_device, sas_device_next,
10253 &ioc->sas_device_init_list, list) {
10254 list_del_init(&sas_device->list);
10255 sas_device_put(sas_device);
10258 list_for_each_entry_safe(sas_device, sas_device_next,
10259 &ioc->sas_device_list, list) {
10260 if (!sas_device->responding)
10261 list_move_tail(&sas_device->list, &head);
10263 sas_device->responding = 0;
10265 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
10268 * Now, uninitialize and remove the unresponding devices we pruned.
10270 list_for_each_entry_safe(sas_device, sas_device_next, &head, list) {
10271 _scsih_remove_device(ioc, sas_device);
10272 list_del_init(&sas_device->list);
10273 sas_device_put(sas_device);
10276 ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n");
10277 INIT_LIST_HEAD(&head);
10278 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
10280 * Clean up the pcie_device_init_list list as
10281 * driver goes for fresh scan as part of diag reset.
10283 list_for_each_entry_safe(pcie_device, pcie_device_next,
10284 &ioc->pcie_device_init_list, list) {
10285 list_del_init(&pcie_device->list);
10286 pcie_device_put(pcie_device);
10289 list_for_each_entry_safe(pcie_device, pcie_device_next,
10290 &ioc->pcie_device_list, list) {
10291 if (!pcie_device->responding)
10292 list_move_tail(&pcie_device->list, &head);
10294 pcie_device->responding = 0;
10296 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
10298 list_for_each_entry_safe(pcie_device, pcie_device_next, &head, list) {
10299 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
10300 list_del_init(&pcie_device->list);
10301 pcie_device_put(pcie_device);
10304 /* removing unresponding volumes */
10305 if (ioc->ir_firmware) {
10306 ioc_info(ioc, "removing unresponding devices: volumes\n");
10307 list_for_each_entry_safe(raid_device, raid_device_next,
10308 &ioc->raid_device_list, list) {
10309 if (!raid_device->responding)
10310 _scsih_sas_volume_delete(ioc,
10311 raid_device->handle);
10313 raid_device->responding = 0;
10317 /* removing unresponding expanders */
10318 ioc_info(ioc, "removing unresponding devices: expanders\n");
10319 spin_lock_irqsave(&ioc->sas_node_lock, flags);
10320 INIT_LIST_HEAD(&tmp_list);
10321 list_for_each_entry_safe(sas_expander, sas_expander_next,
10322 &ioc->sas_expander_list, list) {
10323 if (!sas_expander->responding)
10324 list_move_tail(&sas_expander->list, &tmp_list);
10326 sas_expander->responding = 0;
10328 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10329 list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list,
10331 _scsih_expander_node_remove(ioc, sas_expander);
10334 ioc_info(ioc, "removing unresponding devices: complete\n");
10336 /* unblock devices */
10337 _scsih_ublock_io_all_device(ioc);
10341 _scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc,
10342 struct _sas_node *sas_expander, u16 handle)
10344 Mpi2ExpanderPage1_t expander_pg1;
10345 Mpi2ConfigReply_t mpi_reply;
10348 for (i = 0 ; i < sas_expander->num_phys ; i++) {
10349 if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
10350 &expander_pg1, i, handle))) {
10351 ioc_err(ioc, "failure at %s:%d/%s()!\n",
10352 __FILE__, __LINE__, __func__);
10356 mpt3sas_transport_update_links(ioc, sas_expander->sas_address,
10357 le16_to_cpu(expander_pg1.AttachedDevHandle), i,
10358 expander_pg1.NegotiatedLinkRate >> 4,
10359 sas_expander->port);
10364 * _scsih_scan_for_devices_after_reset - scan for devices after host reset
10365 * @ioc: per adapter object
10368 _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
10370 Mpi2ExpanderPage0_t expander_pg0;
10371 Mpi2SasDevicePage0_t sas_device_pg0;
10372 Mpi26PCIeDevicePage0_t pcie_device_pg0;
10373 Mpi2RaidVolPage1_t volume_pg1;
10374 Mpi2RaidVolPage0_t volume_pg0;
10375 Mpi2RaidPhysDiskPage0_t pd_pg0;
10376 Mpi2EventIrConfigElement_t element;
10377 Mpi2ConfigReply_t mpi_reply;
10378 u8 phys_disk_num, port_id;
10380 u16 handle, parent_handle;
10382 struct _sas_device *sas_device;
10383 struct _pcie_device *pcie_device;
10384 struct _sas_node *expander_device;
10385 static struct _raid_device *raid_device;
10387 unsigned long flags;
10389 ioc_info(ioc, "scan devices: start\n");
10391 _scsih_sas_host_refresh(ioc);
10393 ioc_info(ioc, "\tscan devices: expanders start\n");
10397 while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
10398 MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
10399 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10400 MPI2_IOCSTATUS_MASK;
10401 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10402 ioc_info(ioc, "\tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10403 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10406 handle = le16_to_cpu(expander_pg0.DevHandle);
10407 spin_lock_irqsave(&ioc->sas_node_lock, flags);
10408 port_id = expander_pg0.PhysicalPort;
10409 expander_device = mpt3sas_scsih_expander_find_by_sas_address(
10410 ioc, le64_to_cpu(expander_pg0.SASAddress),
10411 mpt3sas_get_port_by_id(ioc, port_id, 0));
10412 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10413 if (expander_device)
10414 _scsih_refresh_expander_links(ioc, expander_device,
10417 ioc_info(ioc, "\tBEFORE adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
10419 (u64)le64_to_cpu(expander_pg0.SASAddress));
10420 _scsih_expander_add(ioc, handle);
10421 ioc_info(ioc, "\tAFTER adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
10423 (u64)le64_to_cpu(expander_pg0.SASAddress));
10427 ioc_info(ioc, "\tscan devices: expanders complete\n");
10429 if (!ioc->ir_firmware)
10432 ioc_info(ioc, "\tscan devices: phys disk start\n");
10435 phys_disk_num = 0xFF;
10436 while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
10437 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
10439 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10440 MPI2_IOCSTATUS_MASK;
10441 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10442 ioc_info(ioc, "\tbreak from phys disk scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10443 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10446 phys_disk_num = pd_pg0.PhysDiskNum;
10447 handle = le16_to_cpu(pd_pg0.DevHandle);
10448 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
10450 sas_device_put(sas_device);
10453 if (mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
10454 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
10457 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10458 MPI2_IOCSTATUS_MASK;
10459 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10460 ioc_info(ioc, "\tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n",
10461 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10464 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
10465 if (!_scsih_get_sas_address(ioc, parent_handle,
10467 ioc_info(ioc, "\tBEFORE adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
10469 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10470 port_id = sas_device_pg0.PhysicalPort;
10471 mpt3sas_transport_update_links(ioc, sas_address,
10472 handle, sas_device_pg0.PhyNum,
10473 MPI2_SAS_NEG_LINK_RATE_1_5,
10474 mpt3sas_get_port_by_id(ioc, port_id, 0));
10475 set_bit(handle, ioc->pd_handles);
10477 /* This will retry adding the end device.
10478 * _scsih_add_device() will decide on retries and
10479 * return "1" when it should be retried
10481 while (_scsih_add_device(ioc, handle, retry_count++,
10485 ioc_info(ioc, "\tAFTER adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
10487 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10491 ioc_info(ioc, "\tscan devices: phys disk complete\n");
10493 ioc_info(ioc, "\tscan devices: volumes start\n");
10497 while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
10498 &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
10499 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10500 MPI2_IOCSTATUS_MASK;
10501 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10502 ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10503 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10506 handle = le16_to_cpu(volume_pg1.DevHandle);
10507 spin_lock_irqsave(&ioc->raid_device_lock, flags);
10508 raid_device = _scsih_raid_device_find_by_wwid(ioc,
10509 le64_to_cpu(volume_pg1.WWID));
10510 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10513 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
10514 &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
10515 sizeof(Mpi2RaidVolPage0_t)))
10517 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10518 MPI2_IOCSTATUS_MASK;
10519 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10520 ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10521 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10524 if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
10525 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
10526 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
10527 memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
10528 element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
10529 element.VolDevHandle = volume_pg1.DevHandle;
10530 ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n",
10531 volume_pg1.DevHandle);
10532 _scsih_sas_volume_add(ioc, &element);
10533 ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n",
10534 volume_pg1.DevHandle);
10538 ioc_info(ioc, "\tscan devices: volumes complete\n");
10542 ioc_info(ioc, "\tscan devices: end devices start\n");
10546 while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
10547 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
10549 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10550 MPI2_IOCSTATUS_MASK;
10551 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10552 ioc_info(ioc, "\tbreak from end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10553 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10556 handle = le16_to_cpu(sas_device_pg0.DevHandle);
10557 if (!(_scsih_is_end_device(
10558 le32_to_cpu(sas_device_pg0.DeviceInfo))))
10560 port_id = sas_device_pg0.PhysicalPort;
10561 sas_device = mpt3sas_get_sdev_by_addr(ioc,
10562 le64_to_cpu(sas_device_pg0.SASAddress),
10563 mpt3sas_get_port_by_id(ioc, port_id, 0));
10565 sas_device_put(sas_device);
10568 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
10569 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
10570 ioc_info(ioc, "\tBEFORE adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
10572 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10573 mpt3sas_transport_update_links(ioc, sas_address, handle,
10574 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
10575 mpt3sas_get_port_by_id(ioc, port_id, 0));
10577 /* This will retry adding the end device.
10578 * _scsih_add_device() will decide on retries and
10579 * return "1" when it should be retried
10581 while (_scsih_add_device(ioc, handle, retry_count++,
10585 ioc_info(ioc, "\tAFTER adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
10587 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10590 ioc_info(ioc, "\tscan devices: end devices complete\n");
10591 ioc_info(ioc, "\tscan devices: pcie end devices start\n");
10595 while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
10596 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
10598 ioc_status = le16_to_cpu(mpi_reply.IOCStatus)
10599 & MPI2_IOCSTATUS_MASK;
10600 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10601 ioc_info(ioc, "\tbreak from pcie end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10602 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10605 handle = le16_to_cpu(pcie_device_pg0.DevHandle);
10606 if (!(_scsih_is_nvme_pciescsi_device(
10607 le32_to_cpu(pcie_device_pg0.DeviceInfo))))
10609 pcie_device = mpt3sas_get_pdev_by_wwid(ioc,
10610 le64_to_cpu(pcie_device_pg0.WWID));
10612 pcie_device_put(pcie_device);
10616 parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle);
10617 _scsih_pcie_add_device(ioc, handle);
10619 ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n",
10620 handle, (u64)le64_to_cpu(pcie_device_pg0.WWID));
10623 ioc_info(ioc, "\tpcie devices: pcie end devices complete\n");
10624 ioc_info(ioc, "scan devices: complete\n");
10628 * mpt3sas_scsih_pre_reset_handler - reset callback handler (for scsih)
10629 * @ioc: per adapter object
10631 * The handler for doing any required cleanup or initialization.
10633 void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
10635 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
10639 * mpt3sas_scsih_clear_outstanding_scsi_tm_commands - clears outstanding
10641 * @ioc: per adapter object
10643 * The handler for doing any required cleanup or initialization.
10646 mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER *ioc)
10649 ioc_info(ioc, "%s: clear outstanding scsi & tm cmds\n", __func__));
10650 if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
10651 ioc->scsih_cmds.status |= MPT3_CMD_RESET;
10652 mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
10653 complete(&ioc->scsih_cmds.done);
10655 if (ioc->tm_cmds.status & MPT3_CMD_PENDING) {
10656 ioc->tm_cmds.status |= MPT3_CMD_RESET;
10657 mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid);
10658 complete(&ioc->tm_cmds.done);
10661 memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz);
10662 memset(ioc->device_remove_in_progress, 0,
10663 ioc->device_remove_in_progress_sz);
10664 _scsih_fw_event_cleanup_queue(ioc);
10665 _scsih_flush_running_cmds(ioc);
10669 * mpt3sas_scsih_reset_done_handler - reset callback handler (for scsih)
10670 * @ioc: per adapter object
10672 * The handler for doing any required cleanup or initialization.
10675 mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
10677 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
10678 if (!(disable_discovery > 0 && !ioc->sas_hba.num_phys)) {
10679 if (ioc->multipath_on_hba) {
10680 _scsih_sas_port_refresh(ioc);
10681 _scsih_update_vphys_after_reset(ioc);
10683 _scsih_prep_device_scan(ioc);
10684 _scsih_create_enclosure_list_after_reset(ioc);
10685 _scsih_search_responding_sas_devices(ioc);
10686 _scsih_search_responding_pcie_devices(ioc);
10687 _scsih_search_responding_raid_devices(ioc);
10688 _scsih_search_responding_expanders(ioc);
10689 _scsih_error_recovery_delete_devices(ioc);
10694 * _mpt3sas_fw_work - delayed task for processing firmware events
10695 * @ioc: per adapter object
10696 * @fw_event: The fw_event_work object
10700 _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
10702 ioc->current_event = fw_event;
10703 _scsih_fw_event_del_from_list(ioc, fw_event);
10705 /* the queue is being flushed so ignore this event */
10706 if (ioc->remove_host || ioc->pci_error_recovery) {
10707 fw_event_work_put(fw_event);
10708 ioc->current_event = NULL;
10712 switch (fw_event->event) {
10713 case MPT3SAS_PROCESS_TRIGGER_DIAG:
10714 mpt3sas_process_trigger_data(ioc,
10715 (struct SL_WH_TRIGGERS_EVENT_DATA_T *)
10716 fw_event->event_data);
10718 case MPT3SAS_REMOVE_UNRESPONDING_DEVICES:
10719 while (scsi_host_in_recovery(ioc->shost) ||
10720 ioc->shost_recovery) {
10722 * If we're unloading or cancelling the work, bail.
10723 * Otherwise, this can become an infinite loop.
10725 if (ioc->remove_host || ioc->fw_events_cleanup)
10729 _scsih_remove_unresponding_devices(ioc);
10730 _scsih_del_dirty_vphy(ioc);
10731 _scsih_del_dirty_port_entries(ioc);
10732 if (ioc->is_gen35_ioc)
10733 _scsih_update_device_qdepth(ioc);
10734 _scsih_scan_for_devices_after_reset(ioc);
10736 * If diag reset has occurred during the driver load
10737 * then driver has to complete the driver load operation
10738 * by executing the following items:
10739 *- Register the devices from sas_device_init_list to SML
10740 *- clear is_driver_loading flag,
10741 *- start the watchdog thread.
10742 * In happy driver load path, above things are taken care of when
10743 * driver executes scsih_scan_finished().
10745 if (ioc->is_driver_loading)
10746 _scsih_complete_devices_scanning(ioc);
10747 _scsih_set_nvme_max_shutdown_latency(ioc);
10749 case MPT3SAS_PORT_ENABLE_COMPLETE:
10750 ioc->start_scan = 0;
10751 if (missing_delay[0] != -1 && missing_delay[1] != -1)
10752 mpt3sas_base_update_missing_delay(ioc, missing_delay[0],
10755 ioc_info(ioc, "port enable: complete from worker thread\n"));
10757 case MPT3SAS_TURN_ON_PFA_LED:
10758 _scsih_turn_on_pfa_led(ioc, fw_event->device_handle);
10760 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
10761 _scsih_sas_topology_change_event(ioc, fw_event);
10763 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
10764 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
10765 _scsih_sas_device_status_change_event_debug(ioc,
10766 (Mpi2EventDataSasDeviceStatusChange_t *)
10767 fw_event->event_data);
10769 case MPI2_EVENT_SAS_DISCOVERY:
10770 _scsih_sas_discovery_event(ioc, fw_event);
10772 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
10773 _scsih_sas_device_discovery_error_event(ioc, fw_event);
10775 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
10776 _scsih_sas_broadcast_primitive_event(ioc, fw_event);
10778 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
10779 _scsih_sas_enclosure_dev_status_change_event(ioc,
10782 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
10783 _scsih_sas_ir_config_change_event(ioc, fw_event);
10785 case MPI2_EVENT_IR_VOLUME:
10786 _scsih_sas_ir_volume_event(ioc, fw_event);
10788 case MPI2_EVENT_IR_PHYSICAL_DISK:
10789 _scsih_sas_ir_physical_disk_event(ioc, fw_event);
10791 case MPI2_EVENT_IR_OPERATION_STATUS:
10792 _scsih_sas_ir_operation_status_event(ioc, fw_event);
10794 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
10795 _scsih_pcie_device_status_change_event(ioc, fw_event);
10797 case MPI2_EVENT_PCIE_ENUMERATION:
10798 _scsih_pcie_enumeration_event(ioc, fw_event);
10800 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
10801 _scsih_pcie_topology_change_event(ioc, fw_event);
10802 ioc->current_event = NULL;
10806 fw_event_work_put(fw_event);
10807 ioc->current_event = NULL;
10811 * _firmware_event_work
10812 * @work: The fw_event_work object
10815 * wrappers for the work thread handling firmware events
10819 _firmware_event_work(struct work_struct *work)
10821 struct fw_event_work *fw_event = container_of(work,
10822 struct fw_event_work, work);
10824 _mpt3sas_fw_work(fw_event->ioc, fw_event);
10828 * mpt3sas_scsih_event_callback - firmware event handler (called at ISR time)
10829 * @ioc: per adapter object
10830 * @msix_index: MSIX table index supplied by the OS
10831 * @reply: reply message frame(lower 32bit addr)
10832 * Context: interrupt.
10834 * This function merely adds a new work task into ioc->firmware_event_thread.
10835 * The tasks are worked from _firmware_event_work in user context.
10837 * Return: 1 meaning mf should be freed from _base_interrupt
10838 * 0 means the mf is freed from this function.
10841 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
10844 struct fw_event_work *fw_event;
10845 Mpi2EventNotificationReply_t *mpi_reply;
10848 Mpi26EventDataActiveCableExcept_t *ActiveCableEventData;
10850 /* events turned off due to host reset */
10851 if (ioc->pci_error_recovery)
10854 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
10856 if (unlikely(!mpi_reply)) {
10857 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
10858 __FILE__, __LINE__, __func__);
10862 event = le16_to_cpu(mpi_reply->Event);
10864 if (event != MPI2_EVENT_LOG_ENTRY_ADDED)
10865 mpt3sas_trigger_event(ioc, event, 0);
10869 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
10871 Mpi2EventDataSasBroadcastPrimitive_t *baen_data =
10872 (Mpi2EventDataSasBroadcastPrimitive_t *)
10873 mpi_reply->EventData;
10875 if (baen_data->Primitive !=
10876 MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
10879 if (ioc->broadcast_aen_busy) {
10880 ioc->broadcast_aen_pending++;
10883 ioc->broadcast_aen_busy = 1;
10887 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
10888 _scsih_check_topo_delete_events(ioc,
10889 (Mpi2EventDataSasTopologyChangeList_t *)
10890 mpi_reply->EventData);
10892 * No need to add the topology change list
10893 * event to fw event work queue when
10894 * diag reset is going on. Since during diag
10895 * reset driver scan the devices by reading
10896 * sas device page0's not by processing the
10899 if (ioc->shost_recovery)
10902 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
10903 _scsih_check_pcie_topo_remove_events(ioc,
10904 (Mpi26EventDataPCIeTopologyChangeList_t *)
10905 mpi_reply->EventData);
10906 if (ioc->shost_recovery)
10909 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
10910 _scsih_check_ir_config_unhide_events(ioc,
10911 (Mpi2EventDataIrConfigChangeList_t *)
10912 mpi_reply->EventData);
10914 case MPI2_EVENT_IR_VOLUME:
10915 _scsih_check_volume_delete_events(ioc,
10916 (Mpi2EventDataIrVolume_t *)
10917 mpi_reply->EventData);
10919 case MPI2_EVENT_LOG_ENTRY_ADDED:
10921 Mpi2EventDataLogEntryAdded_t *log_entry;
10924 if (!ioc->is_warpdrive)
10927 log_entry = (Mpi2EventDataLogEntryAdded_t *)
10928 mpi_reply->EventData;
10929 log_code = le32_to_cpu(*(__le32 *)log_entry->LogData);
10931 if (le16_to_cpu(log_entry->LogEntryQualifier)
10932 != MPT2_WARPDRIVE_LOGENTRY)
10935 switch (log_code) {
10936 case MPT2_WARPDRIVE_LC_SSDT:
10937 ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
10939 case MPT2_WARPDRIVE_LC_SSDLW:
10940 ioc_warn(ioc, "WarpDrive Warning: Program/Erase Cycles for the WarpDrive subsystem in degraded range. Check WarpDrive documentation for additional details.\n");
10942 case MPT2_WARPDRIVE_LC_SSDLF:
10943 ioc_err(ioc, "WarpDrive Fatal Error: There are no Program/Erase Cycles for the WarpDrive subsystem. The storage device will be in read-only mode. Check WarpDrive documentation for additional details.\n");
10945 case MPT2_WARPDRIVE_LC_BRMF:
10946 ioc_err(ioc, "WarpDrive Fatal Error: The Backup Rail Monitor has failed on the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
10952 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
10953 _scsih_sas_device_status_change_event(ioc,
10954 (Mpi2EventDataSasDeviceStatusChange_t *)
10955 mpi_reply->EventData);
10957 case MPI2_EVENT_IR_OPERATION_STATUS:
10958 case MPI2_EVENT_SAS_DISCOVERY:
10959 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
10960 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
10961 case MPI2_EVENT_IR_PHYSICAL_DISK:
10962 case MPI2_EVENT_PCIE_ENUMERATION:
10963 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
10966 case MPI2_EVENT_TEMP_THRESHOLD:
10967 _scsih_temp_threshold_events(ioc,
10968 (Mpi2EventDataTemperature_t *)
10969 mpi_reply->EventData);
10971 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
10972 ActiveCableEventData =
10973 (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
10974 switch (ActiveCableEventData->ReasonCode) {
10975 case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
10976 ioc_notice(ioc, "Currently an active cable with ReceptacleID %d\n",
10977 ActiveCableEventData->ReceptacleID);
10978 pr_notice("cannot be powered and devices connected\n");
10979 pr_notice("to this active cable will not be seen\n");
10980 pr_notice("This active cable requires %d mW of power\n",
10982 ActiveCableEventData->ActiveCablePowerRequirement));
10985 case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
10986 ioc_notice(ioc, "Currently a cable with ReceptacleID %d\n",
10987 ActiveCableEventData->ReceptacleID);
10989 "is not running at optimal speed(12 Gb/s rate)\n");
10995 default: /* ignore the rest */
10999 sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
11000 fw_event = alloc_fw_event_work(sz);
11002 ioc_err(ioc, "failure at %s:%d/%s()!\n",
11003 __FILE__, __LINE__, __func__);
11007 memcpy(fw_event->event_data, mpi_reply->EventData, sz);
11008 fw_event->ioc = ioc;
11009 fw_event->VF_ID = mpi_reply->VF_ID;
11010 fw_event->VP_ID = mpi_reply->VP_ID;
11011 fw_event->event = event;
11012 _scsih_fw_event_add(ioc, fw_event);
11013 fw_event_work_put(fw_event);
11018 * _scsih_expander_node_remove - removing expander device from list.
11019 * @ioc: per adapter object
11020 * @sas_expander: the sas_device object
11022 * Removing object and freeing associated memory from the
11023 * ioc->sas_expander_list.
11026 _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
11027 struct _sas_node *sas_expander)
11029 struct _sas_port *mpt3sas_port, *next;
11030 unsigned long flags;
11033 /* remove sibling ports attached to this expander */
11034 list_for_each_entry_safe(mpt3sas_port, next,
11035 &sas_expander->sas_port_list, port_list) {
11036 if (ioc->shost_recovery)
11038 if (mpt3sas_port->remote_identify.device_type ==
11040 mpt3sas_device_remove_by_sas_address(ioc,
11041 mpt3sas_port->remote_identify.sas_address,
11042 mpt3sas_port->hba_port);
11043 else if (mpt3sas_port->remote_identify.device_type ==
11044 SAS_EDGE_EXPANDER_DEVICE ||
11045 mpt3sas_port->remote_identify.device_type ==
11046 SAS_FANOUT_EXPANDER_DEVICE)
11047 mpt3sas_expander_remove(ioc,
11048 mpt3sas_port->remote_identify.sas_address,
11049 mpt3sas_port->hba_port);
11052 port_id = sas_expander->port->port_id;
11054 mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
11055 sas_expander->sas_address_parent, sas_expander->port);
11058 "expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
11059 sas_expander->handle, (unsigned long long)
11060 sas_expander->sas_address,
11063 spin_lock_irqsave(&ioc->sas_node_lock, flags);
11064 list_del(&sas_expander->list);
11065 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
11067 kfree(sas_expander->phy);
11068 kfree(sas_expander);
11072 * _scsih_nvme_shutdown - NVMe shutdown notification
11073 * @ioc: per adapter object
11075 * Sending IoUnitControl request with shutdown operation code to alert IOC that
11076 * the host system is shutting down so that IOC can issue NVMe shutdown to
11077 * NVMe drives attached to it.
11080 _scsih_nvme_shutdown(struct MPT3SAS_ADAPTER *ioc)
11082 Mpi26IoUnitControlRequest_t *mpi_request;
11083 Mpi26IoUnitControlReply_t *mpi_reply;
11086 /* are there any NVMe devices ? */
11087 if (list_empty(&ioc->pcie_device_list))
11090 mutex_lock(&ioc->scsih_cmds.mutex);
11092 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
11093 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
11097 ioc->scsih_cmds.status = MPT3_CMD_PENDING;
11099 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
11102 "%s: failed obtaining a smid\n", __func__);
11103 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11107 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
11108 ioc->scsih_cmds.smid = smid;
11109 memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t));
11110 mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL;
11111 mpi_request->Operation = MPI26_CTRL_OP_SHUTDOWN;
11113 init_completion(&ioc->scsih_cmds.done);
11114 ioc->put_smid_default(ioc, smid);
11115 /* Wait for max_shutdown_latency seconds */
11117 "Io Unit Control shutdown (sending), Shutdown latency %d sec\n",
11118 ioc->max_shutdown_latency);
11119 wait_for_completion_timeout(&ioc->scsih_cmds.done,
11120 ioc->max_shutdown_latency*HZ);
11122 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
11123 ioc_err(ioc, "%s: timeout\n", __func__);
11127 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
11128 mpi_reply = ioc->scsih_cmds.reply;
11129 ioc_info(ioc, "Io Unit Control shutdown (complete):"
11130 "ioc_status(0x%04x), loginfo(0x%08x)\n",
11131 le16_to_cpu(mpi_reply->IOCStatus),
11132 le32_to_cpu(mpi_reply->IOCLogInfo));
11135 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11136 mutex_unlock(&ioc->scsih_cmds.mutex);
11141 * _scsih_ir_shutdown - IR shutdown notification
11142 * @ioc: per adapter object
11144 * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that
11145 * the host system is shutting down.
11148 _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
11150 Mpi2RaidActionRequest_t *mpi_request;
11151 Mpi2RaidActionReply_t *mpi_reply;
11154 /* is IR firmware build loaded ? */
11155 if (!ioc->ir_firmware)
11158 /* are there any volumes ? */
11159 if (list_empty(&ioc->raid_device_list))
11162 mutex_lock(&ioc->scsih_cmds.mutex);
11164 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
11165 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
11168 ioc->scsih_cmds.status = MPT3_CMD_PENDING;
11170 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
11172 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
11173 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11177 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
11178 ioc->scsih_cmds.smid = smid;
11179 memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
11181 mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
11182 mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
11184 if (!ioc->hide_ir_msg)
11185 ioc_info(ioc, "IR shutdown (sending)\n");
11186 init_completion(&ioc->scsih_cmds.done);
11187 ioc->put_smid_default(ioc, smid);
11188 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
11190 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
11191 ioc_err(ioc, "%s: timeout\n", __func__);
11195 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
11196 mpi_reply = ioc->scsih_cmds.reply;
11197 if (!ioc->hide_ir_msg)
11198 ioc_info(ioc, "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
11199 le16_to_cpu(mpi_reply->IOCStatus),
11200 le32_to_cpu(mpi_reply->IOCLogInfo));
11204 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11205 mutex_unlock(&ioc->scsih_cmds.mutex);
11209 * _scsih_get_shost_and_ioc - get shost and ioc
11210 * and verify whether they are NULL or not
11211 * @pdev: PCI device struct
11212 * @shost: address of scsi host pointer
11213 * @ioc: address of HBA adapter pointer
11215 * Return zero if *shost and *ioc are not NULL otherwise return error number.
11218 _scsih_get_shost_and_ioc(struct pci_dev *pdev,
11219 struct Scsi_Host **shost, struct MPT3SAS_ADAPTER **ioc)
11221 *shost = pci_get_drvdata(pdev);
11222 if (*shost == NULL) {
11223 dev_err(&pdev->dev, "pdev's driver data is null\n");
11227 *ioc = shost_priv(*shost);
11228 if (*ioc == NULL) {
11229 dev_err(&pdev->dev, "shost's private data is null\n");
11237 * scsih_remove - detach and remove add host
11238 * @pdev: PCI device struct
11240 * Routine called when unloading the driver.
11242 static void scsih_remove(struct pci_dev *pdev)
11244 struct Scsi_Host *shost;
11245 struct MPT3SAS_ADAPTER *ioc;
11246 struct _sas_port *mpt3sas_port, *next_port;
11247 struct _raid_device *raid_device, *next;
11248 struct MPT3SAS_TARGET *sas_target_priv_data;
11249 struct _pcie_device *pcie_device, *pcienext;
11250 struct workqueue_struct *wq;
11251 unsigned long flags;
11252 Mpi2ConfigReply_t mpi_reply;
11253 struct hba_port *port, *port_next;
11255 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
11258 ioc->remove_host = 1;
11260 if (!pci_device_is_present(pdev)) {
11261 mpt3sas_base_pause_mq_polling(ioc);
11262 _scsih_flush_running_cmds(ioc);
11265 _scsih_fw_event_cleanup_queue(ioc);
11267 spin_lock_irqsave(&ioc->fw_event_lock, flags);
11268 wq = ioc->firmware_event_thread;
11269 ioc->firmware_event_thread = NULL;
11270 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
11272 destroy_workqueue(wq);
11274 * Copy back the unmodified ioc page1. so that on next driver load,
11275 * current modified changes on ioc page1 won't take effect.
11277 if (ioc->is_aero_ioc)
11278 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
11279 &ioc->ioc_pg1_copy);
11280 /* release all the volumes */
11281 _scsih_ir_shutdown(ioc);
11282 mpt3sas_destroy_debugfs(ioc);
11283 sas_remove_host(shost);
11284 list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
11286 if (raid_device->starget) {
11287 sas_target_priv_data =
11288 raid_device->starget->hostdata;
11289 sas_target_priv_data->deleted = 1;
11290 scsi_remove_target(&raid_device->starget->dev);
11292 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
11293 raid_device->handle, (u64)raid_device->wwid);
11294 _scsih_raid_device_remove(ioc, raid_device);
11296 list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list,
11298 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
11299 list_del_init(&pcie_device->list);
11300 pcie_device_put(pcie_device);
11303 /* free ports attached to the sas_host */
11304 list_for_each_entry_safe(mpt3sas_port, next_port,
11305 &ioc->sas_hba.sas_port_list, port_list) {
11306 if (mpt3sas_port->remote_identify.device_type ==
11308 mpt3sas_device_remove_by_sas_address(ioc,
11309 mpt3sas_port->remote_identify.sas_address,
11310 mpt3sas_port->hba_port);
11311 else if (mpt3sas_port->remote_identify.device_type ==
11312 SAS_EDGE_EXPANDER_DEVICE ||
11313 mpt3sas_port->remote_identify.device_type ==
11314 SAS_FANOUT_EXPANDER_DEVICE)
11315 mpt3sas_expander_remove(ioc,
11316 mpt3sas_port->remote_identify.sas_address,
11317 mpt3sas_port->hba_port);
11320 list_for_each_entry_safe(port, port_next,
11321 &ioc->port_table_list, list) {
11322 list_del(&port->list);
11326 /* free phys attached to the sas_host */
11327 if (ioc->sas_hba.num_phys) {
11328 kfree(ioc->sas_hba.phy);
11329 ioc->sas_hba.phy = NULL;
11330 ioc->sas_hba.num_phys = 0;
11333 mpt3sas_base_detach(ioc);
11334 mpt3sas_ctl_release(ioc);
11335 spin_lock(&gioc_lock);
11336 list_del(&ioc->list);
11337 spin_unlock(&gioc_lock);
11338 scsi_host_put(shost);
11342 * scsih_shutdown - routine call during system shutdown
11343 * @pdev: PCI device struct
11346 scsih_shutdown(struct pci_dev *pdev)
11348 struct Scsi_Host *shost;
11349 struct MPT3SAS_ADAPTER *ioc;
11350 struct workqueue_struct *wq;
11351 unsigned long flags;
11352 Mpi2ConfigReply_t mpi_reply;
11354 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
11357 ioc->remove_host = 1;
11359 if (!pci_device_is_present(pdev)) {
11360 mpt3sas_base_pause_mq_polling(ioc);
11361 _scsih_flush_running_cmds(ioc);
11364 _scsih_fw_event_cleanup_queue(ioc);
11366 spin_lock_irqsave(&ioc->fw_event_lock, flags);
11367 wq = ioc->firmware_event_thread;
11368 ioc->firmware_event_thread = NULL;
11369 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
11371 destroy_workqueue(wq);
11373 * Copy back the unmodified ioc page1 so that on next driver load,
11374 * current modified changes on ioc page1 won't take effect.
11376 if (ioc->is_aero_ioc)
11377 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
11378 &ioc->ioc_pg1_copy);
11380 _scsih_ir_shutdown(ioc);
11381 _scsih_nvme_shutdown(ioc);
11382 mpt3sas_base_mask_interrupts(ioc);
11383 mpt3sas_base_stop_watchdog(ioc);
11384 ioc->shost_recovery = 1;
11385 mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET);
11386 ioc->shost_recovery = 0;
11387 mpt3sas_base_free_irq(ioc);
11388 mpt3sas_base_disable_msix(ioc);
11393 * _scsih_probe_boot_devices - reports 1st device
11394 * @ioc: per adapter object
11396 * If specified in bios page 2, this routine reports the 1st
11397 * device scsi-ml or sas transport for persistent boot device
11398 * purposes. Please refer to function _scsih_determine_boot_device()
11401 _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
11405 struct _sas_device *sas_device;
11406 struct _raid_device *raid_device;
11407 struct _pcie_device *pcie_device;
11409 u64 sas_address_parent;
11411 unsigned long flags;
11414 struct hba_port *port;
11416 /* no Bios, return immediately */
11417 if (!ioc->bios_pg3.BiosVersion)
11421 if (ioc->req_boot_device.device) {
11422 device = ioc->req_boot_device.device;
11423 channel = ioc->req_boot_device.channel;
11424 } else if (ioc->req_alt_boot_device.device) {
11425 device = ioc->req_alt_boot_device.device;
11426 channel = ioc->req_alt_boot_device.channel;
11427 } else if (ioc->current_boot_device.device) {
11428 device = ioc->current_boot_device.device;
11429 channel = ioc->current_boot_device.channel;
11435 if (channel == RAID_CHANNEL) {
11436 raid_device = device;
11438 * If this boot vd is already registered with SML then
11439 * no need to register it again as part of device scanning
11440 * after diag reset during driver load operation.
11442 if (raid_device->starget)
11444 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
11445 raid_device->id, 0);
11447 _scsih_raid_device_remove(ioc, raid_device);
11448 } else if (channel == PCIE_CHANNEL) {
11449 pcie_device = device;
11451 * If this boot NVMe device is already registered with SML then
11452 * no need to register it again as part of device scanning
11453 * after diag reset during driver load operation.
11455 if (pcie_device->starget)
11457 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11458 tid = pcie_device->id;
11459 list_move_tail(&pcie_device->list, &ioc->pcie_device_list);
11460 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11461 rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, tid, 0);
11463 _scsih_pcie_device_remove(ioc, pcie_device);
11465 sas_device = device;
11467 * If this boot sas/sata device is already registered with SML
11468 * then no need to register it again as part of device scanning
11469 * after diag reset during driver load operation.
11471 if (sas_device->starget)
11473 spin_lock_irqsave(&ioc->sas_device_lock, flags);
11474 handle = sas_device->handle;
11475 sas_address_parent = sas_device->sas_address_parent;
11476 sas_address = sas_device->sas_address;
11477 port = sas_device->port;
11478 list_move_tail(&sas_device->list, &ioc->sas_device_list);
11479 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11481 if (ioc->hide_drives)
11487 if (!mpt3sas_transport_port_add(ioc, handle,
11488 sas_address_parent, port)) {
11489 _scsih_sas_device_remove(ioc, sas_device);
11490 } else if (!sas_device->starget) {
11491 if (!ioc->is_driver_loading) {
11492 mpt3sas_transport_port_remove(ioc,
11494 sas_address_parent, port);
11495 _scsih_sas_device_remove(ioc, sas_device);
11502 * _scsih_probe_raid - reporting raid volumes to scsi-ml
11503 * @ioc: per adapter object
11505 * Called during initial loading of the driver.
11508 _scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc)
11510 struct _raid_device *raid_device, *raid_next;
11513 list_for_each_entry_safe(raid_device, raid_next,
11514 &ioc->raid_device_list, list) {
11515 if (raid_device->starget)
11517 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
11518 raid_device->id, 0);
11520 _scsih_raid_device_remove(ioc, raid_device);
11524 static struct _sas_device *get_next_sas_device(struct MPT3SAS_ADAPTER *ioc)
11526 struct _sas_device *sas_device = NULL;
11527 unsigned long flags;
11529 spin_lock_irqsave(&ioc->sas_device_lock, flags);
11530 if (!list_empty(&ioc->sas_device_init_list)) {
11531 sas_device = list_first_entry(&ioc->sas_device_init_list,
11532 struct _sas_device, list);
11533 sas_device_get(sas_device);
11535 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11540 static void sas_device_make_active(struct MPT3SAS_ADAPTER *ioc,
11541 struct _sas_device *sas_device)
11543 unsigned long flags;
11545 spin_lock_irqsave(&ioc->sas_device_lock, flags);
11548 * Since we dropped the lock during the call to port_add(), we need to
11549 * be careful here that somebody else didn't move or delete this item
11550 * while we were busy with other things.
11552 * If it was on the list, we need a put() for the reference the list
11553 * had. Either way, we need a get() for the destination list.
11555 if (!list_empty(&sas_device->list)) {
11556 list_del_init(&sas_device->list);
11557 sas_device_put(sas_device);
11560 sas_device_get(sas_device);
11561 list_add_tail(&sas_device->list, &ioc->sas_device_list);
11563 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11567 * _scsih_probe_sas - reporting sas devices to sas transport
11568 * @ioc: per adapter object
11570 * Called during initial loading of the driver.
11573 _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc)
11575 struct _sas_device *sas_device;
11577 if (ioc->hide_drives)
11580 while ((sas_device = get_next_sas_device(ioc))) {
11581 if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
11582 sas_device->sas_address_parent, sas_device->port)) {
11583 _scsih_sas_device_remove(ioc, sas_device);
11584 sas_device_put(sas_device);
11586 } else if (!sas_device->starget) {
11588 * When asyn scanning is enabled, its not possible to
11589 * remove devices while scanning is turned on due to an
11590 * oops in scsi_sysfs_add_sdev()->add_device()->
11591 * sysfs_addrm_start()
11593 if (!ioc->is_driver_loading) {
11594 mpt3sas_transport_port_remove(ioc,
11595 sas_device->sas_address,
11596 sas_device->sas_address_parent,
11598 _scsih_sas_device_remove(ioc, sas_device);
11599 sas_device_put(sas_device);
11603 sas_device_make_active(ioc, sas_device);
11604 sas_device_put(sas_device);
11609 * get_next_pcie_device - Get the next pcie device
11610 * @ioc: per adapter object
11612 * Get the next pcie device from pcie_device_init_list list.
11614 * Return: pcie device structure if pcie_device_init_list list is not empty
11615 * otherwise returns NULL
11617 static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc)
11619 struct _pcie_device *pcie_device = NULL;
11620 unsigned long flags;
11622 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11623 if (!list_empty(&ioc->pcie_device_init_list)) {
11624 pcie_device = list_first_entry(&ioc->pcie_device_init_list,
11625 struct _pcie_device, list);
11626 pcie_device_get(pcie_device);
11628 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11630 return pcie_device;
11634 * pcie_device_make_active - Add pcie device to pcie_device_list list
11635 * @ioc: per adapter object
11636 * @pcie_device: pcie device object
11638 * Add the pcie device which has registered with SCSI Transport Later to
11639 * pcie_device_list list
11641 static void pcie_device_make_active(struct MPT3SAS_ADAPTER *ioc,
11642 struct _pcie_device *pcie_device)
11644 unsigned long flags;
11646 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11648 if (!list_empty(&pcie_device->list)) {
11649 list_del_init(&pcie_device->list);
11650 pcie_device_put(pcie_device);
11652 pcie_device_get(pcie_device);
11653 list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
11655 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11659 * _scsih_probe_pcie - reporting PCIe devices to scsi-ml
11660 * @ioc: per adapter object
11662 * Called during initial loading of the driver.
11665 _scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc)
11667 struct _pcie_device *pcie_device;
11670 /* PCIe Device List */
11671 while ((pcie_device = get_next_pcie_device(ioc))) {
11672 if (pcie_device->starget) {
11673 pcie_device_put(pcie_device);
11676 if (pcie_device->access_status ==
11677 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
11678 pcie_device_make_active(ioc, pcie_device);
11679 pcie_device_put(pcie_device);
11682 rc = scsi_add_device(ioc->shost, PCIE_CHANNEL,
11683 pcie_device->id, 0);
11685 _scsih_pcie_device_remove(ioc, pcie_device);
11686 pcie_device_put(pcie_device);
11688 } else if (!pcie_device->starget) {
11690 * When async scanning is enabled, its not possible to
11691 * remove devices while scanning is turned on due to an
11692 * oops in scsi_sysfs_add_sdev()->add_device()->
11693 * sysfs_addrm_start()
11695 if (!ioc->is_driver_loading) {
11696 /* TODO-- Need to find out whether this condition will
11699 _scsih_pcie_device_remove(ioc, pcie_device);
11700 pcie_device_put(pcie_device);
11704 pcie_device_make_active(ioc, pcie_device);
11705 pcie_device_put(pcie_device);
11710 * _scsih_probe_devices - probing for devices
11711 * @ioc: per adapter object
11713 * Called during initial loading of the driver.
11716 _scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc)
11718 u16 volume_mapping_flags;
11720 if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR))
11721 return; /* return when IOC doesn't support initiator mode */
11723 _scsih_probe_boot_devices(ioc);
11725 if (ioc->ir_firmware) {
11726 volume_mapping_flags =
11727 le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) &
11728 MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
11729 if (volume_mapping_flags ==
11730 MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) {
11731 _scsih_probe_raid(ioc);
11732 _scsih_probe_sas(ioc);
11734 _scsih_probe_sas(ioc);
11735 _scsih_probe_raid(ioc);
11738 _scsih_probe_sas(ioc);
11739 _scsih_probe_pcie(ioc);
11744 * scsih_scan_start - scsi lld callback for .scan_start
11745 * @shost: SCSI host pointer
11747 * The shost has the ability to discover targets on its own instead
11748 * of scanning the entire bus. In our implemention, we will kick off
11749 * firmware discovery.
11752 scsih_scan_start(struct Scsi_Host *shost)
11754 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
11756 if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
11757 mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable);
11758 else if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0)
11759 mpt3sas_enable_diag_buffer(ioc, 1);
11761 if (disable_discovery > 0)
11764 ioc->start_scan = 1;
11765 rc = mpt3sas_port_enable(ioc);
11768 ioc_info(ioc, "port enable: FAILED\n");
11772 * _scsih_complete_devices_scanning - add the devices to sml and
11773 * complete ioc initialization.
11774 * @ioc: per adapter object
11778 static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc)
11781 if (ioc->wait_for_discovery_to_complete) {
11782 ioc->wait_for_discovery_to_complete = 0;
11783 _scsih_probe_devices(ioc);
11786 mpt3sas_base_start_watchdog(ioc);
11787 ioc->is_driver_loading = 0;
11791 * scsih_scan_finished - scsi lld callback for .scan_finished
11792 * @shost: SCSI host pointer
11793 * @time: elapsed time of the scan in jiffies
11795 * This function will be called periodicallyn until it returns 1 with the
11796 * scsi_host and the elapsed time of the scan in jiffies. In our implemention,
11797 * we wait for firmware discovery to complete, then return 1.
11800 scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
11802 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
11804 int issue_hard_reset = 0;
11806 if (disable_discovery > 0) {
11807 ioc->is_driver_loading = 0;
11808 ioc->wait_for_discovery_to_complete = 0;
11812 if (time >= (300 * HZ)) {
11813 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11814 ioc_info(ioc, "port enable: FAILED with timeout (timeout=300s)\n");
11815 ioc->is_driver_loading = 0;
11819 if (ioc->start_scan) {
11820 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
11821 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
11822 mpt3sas_print_fault_code(ioc, ioc_state &
11823 MPI2_DOORBELL_DATA_MASK);
11824 issue_hard_reset = 1;
11826 } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
11827 MPI2_IOC_STATE_COREDUMP) {
11828 mpt3sas_base_coredump_info(ioc, ioc_state &
11829 MPI2_DOORBELL_DATA_MASK);
11830 mpt3sas_base_wait_for_coredump_completion(ioc, __func__);
11831 issue_hard_reset = 1;
11837 if (ioc->port_enable_cmds.status & MPT3_CMD_RESET) {
11839 "port enable: aborted due to diag reset\n");
11840 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11843 if (ioc->start_scan_failed) {
11844 ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n",
11845 ioc->start_scan_failed);
11846 ioc->is_driver_loading = 0;
11847 ioc->wait_for_discovery_to_complete = 0;
11848 ioc->remove_host = 1;
11852 ioc_info(ioc, "port enable: SUCCESS\n");
11853 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11854 _scsih_complete_devices_scanning(ioc);
11857 if (issue_hard_reset) {
11858 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11859 if (mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET))
11860 ioc->is_driver_loading = 0;
11866 * scsih_map_queues - map reply queues with request queues
11867 * @shost: SCSI host pointer
11869 static void scsih_map_queues(struct Scsi_Host *shost)
11871 struct MPT3SAS_ADAPTER *ioc =
11872 (struct MPT3SAS_ADAPTER *)shost->hostdata;
11873 struct blk_mq_queue_map *map;
11874 int i, qoff, offset;
11875 int nr_msix_vectors = ioc->iopoll_q_start_index;
11876 int iopoll_q_count = ioc->reply_queue_count - nr_msix_vectors;
11878 if (shost->nr_hw_queues == 1)
11881 for (i = 0, qoff = 0; i < shost->nr_maps; i++) {
11882 map = &shost->tag_set.map[i];
11883 map->nr_queues = 0;
11885 if (i == HCTX_TYPE_DEFAULT) {
11887 nr_msix_vectors - ioc->high_iops_queues;
11888 offset = ioc->high_iops_queues;
11889 } else if (i == HCTX_TYPE_POLL)
11890 map->nr_queues = iopoll_q_count;
11892 if (!map->nr_queues)
11893 BUG_ON(i == HCTX_TYPE_DEFAULT);
11896 * The poll queue(s) doesn't have an IRQ (and hence IRQ
11897 * affinity), so use the regular blk-mq cpu mapping
11899 map->queue_offset = qoff;
11900 if (i != HCTX_TYPE_POLL)
11901 blk_mq_pci_map_queues(map, ioc->pdev, offset);
11903 blk_mq_map_queues(map);
11905 qoff += map->nr_queues;
11909 /* shost template for SAS 2.0 HBA devices */
11910 static const struct scsi_host_template mpt2sas_driver_template = {
11911 .module = THIS_MODULE,
11912 .name = "Fusion MPT SAS Host",
11913 .proc_name = MPT2SAS_DRIVER_NAME,
11914 .queuecommand = scsih_qcmd,
11915 .target_alloc = scsih_target_alloc,
11916 .slave_alloc = scsih_slave_alloc,
11917 .slave_configure = scsih_slave_configure,
11918 .target_destroy = scsih_target_destroy,
11919 .slave_destroy = scsih_slave_destroy,
11920 .scan_finished = scsih_scan_finished,
11921 .scan_start = scsih_scan_start,
11922 .change_queue_depth = scsih_change_queue_depth,
11923 .eh_abort_handler = scsih_abort,
11924 .eh_device_reset_handler = scsih_dev_reset,
11925 .eh_target_reset_handler = scsih_target_reset,
11926 .eh_host_reset_handler = scsih_host_reset,
11927 .bios_param = scsih_bios_param,
11930 .sg_tablesize = MPT2SAS_SG_DEPTH,
11931 .max_sectors = 32767,
11933 .shost_groups = mpt3sas_host_groups,
11934 .sdev_groups = mpt3sas_dev_groups,
11935 .track_queue_depth = 1,
11936 .cmd_size = sizeof(struct scsiio_tracker),
11939 /* raid transport support for SAS 2.0 HBA devices */
11940 static struct raid_function_template mpt2sas_raid_functions = {
11941 .cookie = &mpt2sas_driver_template,
11942 .is_raid = scsih_is_raid,
11943 .get_resync = scsih_get_resync,
11944 .get_state = scsih_get_state,
11947 /* shost template for SAS 3.0 HBA devices */
11948 static const struct scsi_host_template mpt3sas_driver_template = {
11949 .module = THIS_MODULE,
11950 .name = "Fusion MPT SAS Host",
11951 .proc_name = MPT3SAS_DRIVER_NAME,
11952 .queuecommand = scsih_qcmd,
11953 .target_alloc = scsih_target_alloc,
11954 .slave_alloc = scsih_slave_alloc,
11955 .slave_configure = scsih_slave_configure,
11956 .target_destroy = scsih_target_destroy,
11957 .slave_destroy = scsih_slave_destroy,
11958 .scan_finished = scsih_scan_finished,
11959 .scan_start = scsih_scan_start,
11960 .change_queue_depth = scsih_change_queue_depth,
11961 .eh_abort_handler = scsih_abort,
11962 .eh_device_reset_handler = scsih_dev_reset,
11963 .eh_target_reset_handler = scsih_target_reset,
11964 .eh_host_reset_handler = scsih_host_reset,
11965 .bios_param = scsih_bios_param,
11968 .sg_tablesize = MPT3SAS_SG_DEPTH,
11969 .max_sectors = 32767,
11970 .max_segment_size = 0xffffffff,
11971 .cmd_per_lun = 128,
11972 .shost_groups = mpt3sas_host_groups,
11973 .sdev_groups = mpt3sas_dev_groups,
11974 .track_queue_depth = 1,
11975 .cmd_size = sizeof(struct scsiio_tracker),
11976 .map_queues = scsih_map_queues,
11977 .mq_poll = mpt3sas_blk_mq_poll,
11980 /* raid transport support for SAS 3.0 HBA devices */
11981 static struct raid_function_template mpt3sas_raid_functions = {
11982 .cookie = &mpt3sas_driver_template,
11983 .is_raid = scsih_is_raid,
11984 .get_resync = scsih_get_resync,
11985 .get_state = scsih_get_state,
11989 * _scsih_determine_hba_mpi_version - determine in which MPI version class
11990 * this device belongs to.
11991 * @pdev: PCI device struct
11993 * return MPI2_VERSION for SAS 2.0 HBA devices,
11994 * MPI25_VERSION for SAS 3.0 HBA devices, and
11995 * MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices
11998 _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
12001 switch (pdev->device) {
12002 case MPI2_MFGPAGE_DEVID_SSS6200:
12003 case MPI2_MFGPAGE_DEVID_SAS2004:
12004 case MPI2_MFGPAGE_DEVID_SAS2008:
12005 case MPI2_MFGPAGE_DEVID_SAS2108_1:
12006 case MPI2_MFGPAGE_DEVID_SAS2108_2:
12007 case MPI2_MFGPAGE_DEVID_SAS2108_3:
12008 case MPI2_MFGPAGE_DEVID_SAS2116_1:
12009 case MPI2_MFGPAGE_DEVID_SAS2116_2:
12010 case MPI2_MFGPAGE_DEVID_SAS2208_1:
12011 case MPI2_MFGPAGE_DEVID_SAS2208_2:
12012 case MPI2_MFGPAGE_DEVID_SAS2208_3:
12013 case MPI2_MFGPAGE_DEVID_SAS2208_4:
12014 case MPI2_MFGPAGE_DEVID_SAS2208_5:
12015 case MPI2_MFGPAGE_DEVID_SAS2208_6:
12016 case MPI2_MFGPAGE_DEVID_SAS2308_1:
12017 case MPI2_MFGPAGE_DEVID_SAS2308_2:
12018 case MPI2_MFGPAGE_DEVID_SAS2308_3:
12019 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
12020 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
12021 return MPI2_VERSION;
12022 case MPI25_MFGPAGE_DEVID_SAS3004:
12023 case MPI25_MFGPAGE_DEVID_SAS3008:
12024 case MPI25_MFGPAGE_DEVID_SAS3108_1:
12025 case MPI25_MFGPAGE_DEVID_SAS3108_2:
12026 case MPI25_MFGPAGE_DEVID_SAS3108_5:
12027 case MPI25_MFGPAGE_DEVID_SAS3108_6:
12028 return MPI25_VERSION;
12029 case MPI26_MFGPAGE_DEVID_SAS3216:
12030 case MPI26_MFGPAGE_DEVID_SAS3224:
12031 case MPI26_MFGPAGE_DEVID_SAS3316_1:
12032 case MPI26_MFGPAGE_DEVID_SAS3316_2:
12033 case MPI26_MFGPAGE_DEVID_SAS3316_3:
12034 case MPI26_MFGPAGE_DEVID_SAS3316_4:
12035 case MPI26_MFGPAGE_DEVID_SAS3324_1:
12036 case MPI26_MFGPAGE_DEVID_SAS3324_2:
12037 case MPI26_MFGPAGE_DEVID_SAS3324_3:
12038 case MPI26_MFGPAGE_DEVID_SAS3324_4:
12039 case MPI26_MFGPAGE_DEVID_SAS3508:
12040 case MPI26_MFGPAGE_DEVID_SAS3508_1:
12041 case MPI26_MFGPAGE_DEVID_SAS3408:
12042 case MPI26_MFGPAGE_DEVID_SAS3516:
12043 case MPI26_MFGPAGE_DEVID_SAS3516_1:
12044 case MPI26_MFGPAGE_DEVID_SAS3416:
12045 case MPI26_MFGPAGE_DEVID_SAS3616:
12046 case MPI26_ATLAS_PCIe_SWITCH_DEVID:
12047 case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
12048 case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
12049 case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
12050 case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
12051 case MPI26_MFGPAGE_DEVID_INVALID0_3916:
12052 case MPI26_MFGPAGE_DEVID_INVALID1_3916:
12053 case MPI26_MFGPAGE_DEVID_INVALID0_3816:
12054 case MPI26_MFGPAGE_DEVID_INVALID1_3816:
12055 return MPI26_VERSION;
12061 * _scsih_probe - attach and add scsi host
12062 * @pdev: PCI device struct
12063 * @id: pci device id
12065 * Return: 0 success, anything else error.
12068 _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
12070 struct MPT3SAS_ADAPTER *ioc;
12071 struct Scsi_Host *shost = NULL;
12073 u16 hba_mpi_version;
12074 int iopoll_q_count = 0;
12076 /* Determine in which MPI version class this pci device belongs */
12077 hba_mpi_version = _scsih_determine_hba_mpi_version(pdev);
12078 if (hba_mpi_version == 0)
12081 /* Enumerate only SAS 2.0 HBA's if hbas_to_enumerate is one,
12082 * for other generation HBA's return with -ENODEV
12084 if ((hbas_to_enumerate == 1) && (hba_mpi_version != MPI2_VERSION))
12087 /* Enumerate only SAS 3.0 HBA's if hbas_to_enumerate is two,
12088 * for other generation HBA's return with -ENODEV
12090 if ((hbas_to_enumerate == 2) && (!(hba_mpi_version == MPI25_VERSION
12091 || hba_mpi_version == MPI26_VERSION)))
12094 switch (hba_mpi_version) {
12096 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
12097 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
12098 /* Use mpt2sas driver host template for SAS 2.0 HBA's */
12099 shost = scsi_host_alloc(&mpt2sas_driver_template,
12100 sizeof(struct MPT3SAS_ADAPTER));
12103 ioc = shost_priv(shost);
12104 memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
12105 ioc->hba_mpi_version_belonged = hba_mpi_version;
12106 ioc->id = mpt2_ids++;
12107 sprintf(ioc->driver_name, "%s", MPT2SAS_DRIVER_NAME);
12108 switch (pdev->device) {
12109 case MPI2_MFGPAGE_DEVID_SSS6200:
12110 ioc->is_warpdrive = 1;
12111 ioc->hide_ir_msg = 1;
12113 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
12114 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
12115 ioc->is_mcpu_endpoint = 1;
12118 ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS;
12122 if (multipath_on_hba == -1 || multipath_on_hba == 0)
12123 ioc->multipath_on_hba = 0;
12125 ioc->multipath_on_hba = 1;
12128 case MPI25_VERSION:
12129 case MPI26_VERSION:
12130 /* Use mpt3sas driver host template for SAS 3.0 HBA's */
12131 shost = scsi_host_alloc(&mpt3sas_driver_template,
12132 sizeof(struct MPT3SAS_ADAPTER));
12135 ioc = shost_priv(shost);
12136 memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
12137 ioc->hba_mpi_version_belonged = hba_mpi_version;
12138 ioc->id = mpt3_ids++;
12139 sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME);
12140 switch (pdev->device) {
12141 case MPI26_MFGPAGE_DEVID_SAS3508:
12142 case MPI26_MFGPAGE_DEVID_SAS3508_1:
12143 case MPI26_MFGPAGE_DEVID_SAS3408:
12144 case MPI26_MFGPAGE_DEVID_SAS3516:
12145 case MPI26_MFGPAGE_DEVID_SAS3516_1:
12146 case MPI26_MFGPAGE_DEVID_SAS3416:
12147 case MPI26_MFGPAGE_DEVID_SAS3616:
12148 case MPI26_ATLAS_PCIe_SWITCH_DEVID:
12149 ioc->is_gen35_ioc = 1;
12151 case MPI26_MFGPAGE_DEVID_INVALID0_3816:
12152 case MPI26_MFGPAGE_DEVID_INVALID0_3916:
12153 dev_err(&pdev->dev,
12154 "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Invalid",
12155 pdev->device, pdev->subsystem_vendor,
12156 pdev->subsystem_device);
12158 case MPI26_MFGPAGE_DEVID_INVALID1_3816:
12159 case MPI26_MFGPAGE_DEVID_INVALID1_3916:
12160 dev_err(&pdev->dev,
12161 "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Tampered",
12162 pdev->device, pdev->subsystem_vendor,
12163 pdev->subsystem_device);
12165 case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
12166 case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
12167 dev_info(&pdev->dev,
12168 "HBA is in Configurable Secure mode\n");
12170 case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
12171 case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
12172 ioc->is_aero_ioc = ioc->is_gen35_ioc = 1;
12175 ioc->is_gen35_ioc = ioc->is_aero_ioc = 0;
12177 if ((ioc->hba_mpi_version_belonged == MPI25_VERSION &&
12178 pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) ||
12179 (ioc->hba_mpi_version_belonged == MPI26_VERSION)) {
12180 ioc->combined_reply_queue = 1;
12181 if (ioc->is_gen35_ioc)
12182 ioc->combined_reply_index_count =
12183 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35;
12185 ioc->combined_reply_index_count =
12186 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3;
12189 switch (ioc->is_gen35_ioc) {
12191 if (multipath_on_hba == -1 || multipath_on_hba == 0)
12192 ioc->multipath_on_hba = 0;
12194 ioc->multipath_on_hba = 1;
12197 if (multipath_on_hba == -1 || multipath_on_hba > 0)
12198 ioc->multipath_on_hba = 1;
12200 ioc->multipath_on_hba = 0;
12211 INIT_LIST_HEAD(&ioc->list);
12212 spin_lock(&gioc_lock);
12213 list_add_tail(&ioc->list, &mpt3sas_ioc_list);
12214 spin_unlock(&gioc_lock);
12215 ioc->shost = shost;
12217 ioc->scsi_io_cb_idx = scsi_io_cb_idx;
12218 ioc->tm_cb_idx = tm_cb_idx;
12219 ioc->ctl_cb_idx = ctl_cb_idx;
12220 ioc->base_cb_idx = base_cb_idx;
12221 ioc->port_enable_cb_idx = port_enable_cb_idx;
12222 ioc->transport_cb_idx = transport_cb_idx;
12223 ioc->scsih_cb_idx = scsih_cb_idx;
12224 ioc->config_cb_idx = config_cb_idx;
12225 ioc->tm_tr_cb_idx = tm_tr_cb_idx;
12226 ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx;
12227 ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
12228 ioc->logging_level = logging_level;
12229 ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
12230 /* Host waits for minimum of six seconds */
12231 ioc->max_shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
12233 * Enable MEMORY MOVE support flag.
12235 ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_MEMMOVE;
12236 /* Enable ADDITIONAL QUERY support flag. */
12237 ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_ADDNLQUERY;
12239 ioc->enable_sdev_max_qd = enable_sdev_max_qd;
12241 /* misc semaphores and spin locks */
12242 mutex_init(&ioc->reset_in_progress_mutex);
12243 /* initializing pci_access_mutex lock */
12244 mutex_init(&ioc->pci_access_mutex);
12245 spin_lock_init(&ioc->ioc_reset_in_progress_lock);
12246 spin_lock_init(&ioc->scsi_lookup_lock);
12247 spin_lock_init(&ioc->sas_device_lock);
12248 spin_lock_init(&ioc->sas_node_lock);
12249 spin_lock_init(&ioc->fw_event_lock);
12250 spin_lock_init(&ioc->raid_device_lock);
12251 spin_lock_init(&ioc->pcie_device_lock);
12252 spin_lock_init(&ioc->diag_trigger_lock);
12254 INIT_LIST_HEAD(&ioc->sas_device_list);
12255 INIT_LIST_HEAD(&ioc->sas_device_init_list);
12256 INIT_LIST_HEAD(&ioc->sas_expander_list);
12257 INIT_LIST_HEAD(&ioc->enclosure_list);
12258 INIT_LIST_HEAD(&ioc->pcie_device_list);
12259 INIT_LIST_HEAD(&ioc->pcie_device_init_list);
12260 INIT_LIST_HEAD(&ioc->fw_event_list);
12261 INIT_LIST_HEAD(&ioc->raid_device_list);
12262 INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
12263 INIT_LIST_HEAD(&ioc->delayed_tr_list);
12264 INIT_LIST_HEAD(&ioc->delayed_sc_list);
12265 INIT_LIST_HEAD(&ioc->delayed_event_ack_list);
12266 INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
12267 INIT_LIST_HEAD(&ioc->reply_queue_list);
12268 INIT_LIST_HEAD(&ioc->port_table_list);
12270 sprintf(ioc->name, "%s_cm%d", ioc->driver_name, ioc->id);
12272 /* init shost parameters */
12273 shost->max_cmd_len = 32;
12274 shost->max_lun = max_lun;
12275 shost->transportt = mpt3sas_transport_template;
12276 shost->unique_id = ioc->id;
12278 if (ioc->is_mcpu_endpoint) {
12279 /* mCPU MPI support 64K max IO */
12280 shost->max_sectors = 128;
12281 ioc_info(ioc, "The max_sectors value is set to %d\n",
12282 shost->max_sectors);
12284 if (max_sectors != 0xFFFF) {
12285 if (max_sectors < 64) {
12286 shost->max_sectors = 64;
12287 ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767. Assigning value of 64.\n",
12289 } else if (max_sectors > 32767) {
12290 shost->max_sectors = 32767;
12291 ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767.Assigning default value of 32767.\n",
12294 shost->max_sectors = max_sectors & 0xFFFE;
12295 ioc_info(ioc, "The max_sectors value is set to %d\n",
12296 shost->max_sectors);
12300 /* register EEDP capabilities with SCSI layer */
12301 if (prot_mask >= 0)
12302 scsi_host_set_prot(shost, (prot_mask & 0x07));
12304 scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
12305 | SHOST_DIF_TYPE2_PROTECTION
12306 | SHOST_DIF_TYPE3_PROTECTION);
12308 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
12311 snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
12312 "fw_event_%s%d", ioc->driver_name, ioc->id);
12313 ioc->firmware_event_thread = alloc_ordered_workqueue(
12314 ioc->firmware_event_name, 0);
12315 if (!ioc->firmware_event_thread) {
12316 ioc_err(ioc, "failure at %s:%d/%s()!\n",
12317 __FILE__, __LINE__, __func__);
12319 goto out_thread_fail;
12322 shost->host_tagset = 0;
12324 if (ioc->is_gen35_ioc && host_tagset_enable)
12325 shost->host_tagset = 1;
12327 ioc->is_driver_loading = 1;
12328 if ((mpt3sas_base_attach(ioc))) {
12329 ioc_err(ioc, "failure at %s:%d/%s()!\n",
12330 __FILE__, __LINE__, __func__);
12332 goto out_attach_fail;
12335 if (ioc->is_warpdrive) {
12336 if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS)
12337 ioc->hide_drives = 0;
12338 else if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_HIDE_ALL_DISKS)
12339 ioc->hide_drives = 1;
12341 if (mpt3sas_get_num_volumes(ioc))
12342 ioc->hide_drives = 1;
12344 ioc->hide_drives = 0;
12347 ioc->hide_drives = 0;
12349 shost->nr_hw_queues = 1;
12351 if (shost->host_tagset) {
12352 shost->nr_hw_queues =
12353 ioc->reply_queue_count - ioc->high_iops_queues;
12356 ioc->reply_queue_count - ioc->iopoll_q_start_index;
12358 shost->nr_maps = iopoll_q_count ? 3 : 1;
12360 dev_info(&ioc->pdev->dev,
12361 "Max SCSIIO MPT commands: %d shared with nr_hw_queues = %d\n",
12362 shost->can_queue, shost->nr_hw_queues);
12365 rv = scsi_add_host(shost, &pdev->dev);
12367 ioc_err(ioc, "failure at %s:%d/%s()!\n",
12368 __FILE__, __LINE__, __func__);
12369 goto out_add_shost_fail;
12372 scsi_scan_host(shost);
12373 mpt3sas_setup_debugfs(ioc);
12375 out_add_shost_fail:
12376 mpt3sas_base_detach(ioc);
12378 destroy_workqueue(ioc->firmware_event_thread);
12380 spin_lock(&gioc_lock);
12381 list_del(&ioc->list);
12382 spin_unlock(&gioc_lock);
12383 scsi_host_put(shost);
12388 * scsih_suspend - power management suspend main entry point
12389 * @dev: Device struct
12391 * Return: 0 success, anything else error.
12393 static int __maybe_unused
12394 scsih_suspend(struct device *dev)
12396 struct pci_dev *pdev = to_pci_dev(dev);
12397 struct Scsi_Host *shost;
12398 struct MPT3SAS_ADAPTER *ioc;
12401 rc = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
12405 mpt3sas_base_stop_watchdog(ioc);
12406 scsi_block_requests(shost);
12407 _scsih_nvme_shutdown(ioc);
12408 ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state\n",
12409 pdev, pci_name(pdev));
12411 mpt3sas_base_free_resources(ioc);
12416 * scsih_resume - power management resume main entry point
12417 * @dev: Device struct
12419 * Return: 0 success, anything else error.
12421 static int __maybe_unused
12422 scsih_resume(struct device *dev)
12424 struct pci_dev *pdev = to_pci_dev(dev);
12425 struct Scsi_Host *shost;
12426 struct MPT3SAS_ADAPTER *ioc;
12427 pci_power_t device_state = pdev->current_state;
12430 r = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
12434 ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
12435 pdev, pci_name(pdev), device_state);
12438 r = mpt3sas_base_map_resources(ioc);
12441 ioc_info(ioc, "Issuing Hard Reset as part of OS Resume\n");
12442 mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET);
12443 scsi_unblock_requests(shost);
12444 mpt3sas_base_start_watchdog(ioc);
12449 * scsih_pci_error_detected - Called when a PCI error is detected.
12450 * @pdev: PCI device struct
12451 * @state: PCI channel state
12453 * Description: Called when a PCI error is detected.
12455 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
12457 static pci_ers_result_t
12458 scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
12460 struct Scsi_Host *shost;
12461 struct MPT3SAS_ADAPTER *ioc;
12463 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12464 return PCI_ERS_RESULT_DISCONNECT;
12466 ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state);
12469 case pci_channel_io_normal:
12470 return PCI_ERS_RESULT_CAN_RECOVER;
12471 case pci_channel_io_frozen:
12472 /* Fatal error, prepare for slot reset */
12473 ioc->pci_error_recovery = 1;
12474 scsi_block_requests(ioc->shost);
12475 mpt3sas_base_stop_watchdog(ioc);
12476 mpt3sas_base_free_resources(ioc);
12477 return PCI_ERS_RESULT_NEED_RESET;
12478 case pci_channel_io_perm_failure:
12479 /* Permanent error, prepare for device removal */
12480 ioc->pci_error_recovery = 1;
12481 mpt3sas_base_stop_watchdog(ioc);
12482 mpt3sas_base_pause_mq_polling(ioc);
12483 _scsih_flush_running_cmds(ioc);
12484 return PCI_ERS_RESULT_DISCONNECT;
12486 return PCI_ERS_RESULT_NEED_RESET;
12490 * scsih_pci_slot_reset - Called when PCI slot has been reset.
12491 * @pdev: PCI device struct
12493 * Description: This routine is called by the pci error recovery
12494 * code after the PCI slot has been reset, just before we
12495 * should resume normal operations.
12497 static pci_ers_result_t
12498 scsih_pci_slot_reset(struct pci_dev *pdev)
12500 struct Scsi_Host *shost;
12501 struct MPT3SAS_ADAPTER *ioc;
12504 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12505 return PCI_ERS_RESULT_DISCONNECT;
12507 ioc_info(ioc, "PCI error: slot reset callback!!\n");
12509 ioc->pci_error_recovery = 0;
12511 pci_restore_state(pdev);
12512 rc = mpt3sas_base_map_resources(ioc);
12514 return PCI_ERS_RESULT_DISCONNECT;
12516 ioc_info(ioc, "Issuing Hard Reset as part of PCI Slot Reset\n");
12517 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
12519 ioc_warn(ioc, "hard reset: %s\n",
12520 (rc == 0) ? "success" : "failed");
12523 return PCI_ERS_RESULT_RECOVERED;
12525 return PCI_ERS_RESULT_DISCONNECT;
12529 * scsih_pci_resume() - resume normal ops after PCI reset
12530 * @pdev: pointer to PCI device
12532 * Called when the error recovery driver tells us that its
12533 * OK to resume normal operation. Use completion to allow
12534 * halted scsi ops to resume.
12537 scsih_pci_resume(struct pci_dev *pdev)
12539 struct Scsi_Host *shost;
12540 struct MPT3SAS_ADAPTER *ioc;
12542 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12545 ioc_info(ioc, "PCI error: resume callback!!\n");
12547 mpt3sas_base_start_watchdog(ioc);
12548 scsi_unblock_requests(ioc->shost);
12552 * scsih_pci_mmio_enabled - Enable MMIO and dump debug registers
12553 * @pdev: pointer to PCI device
12555 static pci_ers_result_t
12556 scsih_pci_mmio_enabled(struct pci_dev *pdev)
12558 struct Scsi_Host *shost;
12559 struct MPT3SAS_ADAPTER *ioc;
12561 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12562 return PCI_ERS_RESULT_DISCONNECT;
12564 ioc_info(ioc, "PCI error: mmio enabled callback!!\n");
12566 /* TODO - dump whatever for debugging purposes */
12568 /* This called only if scsih_pci_error_detected returns
12569 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
12570 * works, no need to reset slot.
12572 return PCI_ERS_RESULT_RECOVERED;
12576 * scsih_ncq_prio_supp - Check for NCQ command priority support
12577 * @sdev: scsi device struct
12579 * This is called when a user indicates they would like to enable
12580 * ncq command priorities. This works only on SATA devices.
12582 bool scsih_ncq_prio_supp(struct scsi_device *sdev)
12584 struct scsi_vpd *vpd;
12585 bool ncq_prio_supp = false;
12588 vpd = rcu_dereference(sdev->vpd_pg89);
12589 if (!vpd || vpd->len < 214)
12592 ncq_prio_supp = (vpd->data[213] >> 4) & 1;
12596 return ncq_prio_supp;
12599 * The pci device ids are defined in mpi/mpi2_cnfg.h.
12601 static const struct pci_device_id mpt3sas_pci_table[] = {
12602 /* Spitfire ~ 2004 */
12603 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2004,
12604 PCI_ANY_ID, PCI_ANY_ID },
12605 /* Falcon ~ 2008 */
12606 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2008,
12607 PCI_ANY_ID, PCI_ANY_ID },
12608 /* Liberator ~ 2108 */
12609 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_1,
12610 PCI_ANY_ID, PCI_ANY_ID },
12611 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_2,
12612 PCI_ANY_ID, PCI_ANY_ID },
12613 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3,
12614 PCI_ANY_ID, PCI_ANY_ID },
12615 /* Meteor ~ 2116 */
12616 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1,
12617 PCI_ANY_ID, PCI_ANY_ID },
12618 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2,
12619 PCI_ANY_ID, PCI_ANY_ID },
12620 /* Thunderbolt ~ 2208 */
12621 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1,
12622 PCI_ANY_ID, PCI_ANY_ID },
12623 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2,
12624 PCI_ANY_ID, PCI_ANY_ID },
12625 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3,
12626 PCI_ANY_ID, PCI_ANY_ID },
12627 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4,
12628 PCI_ANY_ID, PCI_ANY_ID },
12629 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5,
12630 PCI_ANY_ID, PCI_ANY_ID },
12631 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6,
12632 PCI_ANY_ID, PCI_ANY_ID },
12633 /* Mustang ~ 2308 */
12634 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1,
12635 PCI_ANY_ID, PCI_ANY_ID },
12636 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2,
12637 PCI_ANY_ID, PCI_ANY_ID },
12638 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3,
12639 PCI_ANY_ID, PCI_ANY_ID },
12640 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP,
12641 PCI_ANY_ID, PCI_ANY_ID },
12642 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1,
12643 PCI_ANY_ID, PCI_ANY_ID },
12645 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200,
12646 PCI_ANY_ID, PCI_ANY_ID },
12647 /* Fury ~ 3004 and 3008 */
12648 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004,
12649 PCI_ANY_ID, PCI_ANY_ID },
12650 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008,
12651 PCI_ANY_ID, PCI_ANY_ID },
12652 /* Invader ~ 3108 */
12653 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1,
12654 PCI_ANY_ID, PCI_ANY_ID },
12655 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2,
12656 PCI_ANY_ID, PCI_ANY_ID },
12657 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5,
12658 PCI_ANY_ID, PCI_ANY_ID },
12659 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6,
12660 PCI_ANY_ID, PCI_ANY_ID },
12661 /* Cutlass ~ 3216 and 3224 */
12662 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3216,
12663 PCI_ANY_ID, PCI_ANY_ID },
12664 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3224,
12665 PCI_ANY_ID, PCI_ANY_ID },
12666 /* Intruder ~ 3316 and 3324 */
12667 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_1,
12668 PCI_ANY_ID, PCI_ANY_ID },
12669 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_2,
12670 PCI_ANY_ID, PCI_ANY_ID },
12671 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_3,
12672 PCI_ANY_ID, PCI_ANY_ID },
12673 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_4,
12674 PCI_ANY_ID, PCI_ANY_ID },
12675 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_1,
12676 PCI_ANY_ID, PCI_ANY_ID },
12677 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_2,
12678 PCI_ANY_ID, PCI_ANY_ID },
12679 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_3,
12680 PCI_ANY_ID, PCI_ANY_ID },
12681 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4,
12682 PCI_ANY_ID, PCI_ANY_ID },
12683 /* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/
12684 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508,
12685 PCI_ANY_ID, PCI_ANY_ID },
12686 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1,
12687 PCI_ANY_ID, PCI_ANY_ID },
12688 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408,
12689 PCI_ANY_ID, PCI_ANY_ID },
12690 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516,
12691 PCI_ANY_ID, PCI_ANY_ID },
12692 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1,
12693 PCI_ANY_ID, PCI_ANY_ID },
12694 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416,
12695 PCI_ANY_ID, PCI_ANY_ID },
12696 /* Mercator ~ 3616*/
12697 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616,
12698 PCI_ANY_ID, PCI_ANY_ID },
12700 /* Aero SI 0x00E1 Configurable Secure
12701 * 0x00E2 Hard Secure
12703 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3916,
12704 PCI_ANY_ID, PCI_ANY_ID },
12705 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3916,
12706 PCI_ANY_ID, PCI_ANY_ID },
12709 * Aero SI –> 0x00E0 Invalid, 0x00E3 Tampered
12711 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3916,
12712 PCI_ANY_ID, PCI_ANY_ID },
12713 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3916,
12714 PCI_ANY_ID, PCI_ANY_ID },
12716 /* Atlas PCIe Switch Management Port */
12717 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_ATLAS_PCIe_SWITCH_DEVID,
12718 PCI_ANY_ID, PCI_ANY_ID },
12720 /* Sea SI 0x00E5 Configurable Secure
12721 * 0x00E6 Hard Secure
12723 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3816,
12724 PCI_ANY_ID, PCI_ANY_ID },
12725 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
12726 PCI_ANY_ID, PCI_ANY_ID },
12729 * ATTO Branded ExpressSAS H12xx GT
12731 { MPI2_MFGPAGE_VENDORID_ATTO, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
12732 PCI_ANY_ID, PCI_ANY_ID },
12735 * Sea SI –> 0x00E4 Invalid, 0x00E7 Tampered
12737 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3816,
12738 PCI_ANY_ID, PCI_ANY_ID },
12739 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3816,
12740 PCI_ANY_ID, PCI_ANY_ID },
12742 {0} /* Terminating entry */
12744 MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
12746 static struct pci_error_handlers _mpt3sas_err_handler = {
12747 .error_detected = scsih_pci_error_detected,
12748 .mmio_enabled = scsih_pci_mmio_enabled,
12749 .slot_reset = scsih_pci_slot_reset,
12750 .resume = scsih_pci_resume,
12753 static SIMPLE_DEV_PM_OPS(scsih_pm_ops, scsih_suspend, scsih_resume);
12755 static struct pci_driver mpt3sas_driver = {
12756 .name = MPT3SAS_DRIVER_NAME,
12757 .id_table = mpt3sas_pci_table,
12758 .probe = _scsih_probe,
12759 .remove = scsih_remove,
12760 .shutdown = scsih_shutdown,
12761 .err_handler = &_mpt3sas_err_handler,
12762 .driver.pm = &scsih_pm_ops,
12766 * scsih_init - main entry point for this driver.
12768 * Return: 0 success, anything else error.
12776 mpt3sas_base_initialize_callback_handler();
12778 /* queuecommand callback hander */
12779 scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done);
12781 /* task management callback handler */
12782 tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done);
12784 /* base internal commands callback handler */
12785 base_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_base_done);
12786 port_enable_cb_idx = mpt3sas_base_register_callback_handler(
12787 mpt3sas_port_enable_done);
12789 /* transport internal commands callback handler */
12790 transport_cb_idx = mpt3sas_base_register_callback_handler(
12791 mpt3sas_transport_done);
12793 /* scsih internal commands callback handler */
12794 scsih_cb_idx = mpt3sas_base_register_callback_handler(_scsih_done);
12796 /* configuration page API internal commands callback handler */
12797 config_cb_idx = mpt3sas_base_register_callback_handler(
12798 mpt3sas_config_done);
12800 /* ctl module callback handler */
12801 ctl_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_ctl_done);
12803 tm_tr_cb_idx = mpt3sas_base_register_callback_handler(
12804 _scsih_tm_tr_complete);
12806 tm_tr_volume_cb_idx = mpt3sas_base_register_callback_handler(
12807 _scsih_tm_volume_tr_complete);
12809 tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler(
12810 _scsih_sas_control_complete);
12812 mpt3sas_init_debugfs();
12817 * scsih_exit - exit point for this driver (when it is a module).
12819 * Return: 0 success, anything else error.
12825 mpt3sas_base_release_callback_handler(scsi_io_cb_idx);
12826 mpt3sas_base_release_callback_handler(tm_cb_idx);
12827 mpt3sas_base_release_callback_handler(base_cb_idx);
12828 mpt3sas_base_release_callback_handler(port_enable_cb_idx);
12829 mpt3sas_base_release_callback_handler(transport_cb_idx);
12830 mpt3sas_base_release_callback_handler(scsih_cb_idx);
12831 mpt3sas_base_release_callback_handler(config_cb_idx);
12832 mpt3sas_base_release_callback_handler(ctl_cb_idx);
12834 mpt3sas_base_release_callback_handler(tm_tr_cb_idx);
12835 mpt3sas_base_release_callback_handler(tm_tr_volume_cb_idx);
12836 mpt3sas_base_release_callback_handler(tm_sas_control_cb_idx);
12838 /* raid transport support */
12839 if (hbas_to_enumerate != 1)
12840 raid_class_release(mpt3sas_raid_template);
12841 if (hbas_to_enumerate != 2)
12842 raid_class_release(mpt2sas_raid_template);
12843 sas_release_transport(mpt3sas_transport_template);
12844 mpt3sas_exit_debugfs();
12848 * _mpt3sas_init - main entry point for this driver.
12850 * Return: 0 success, anything else error.
12853 _mpt3sas_init(void)
12857 pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME,
12858 MPT3SAS_DRIVER_VERSION);
12860 mpt3sas_transport_template =
12861 sas_attach_transport(&mpt3sas_transport_functions);
12862 if (!mpt3sas_transport_template)
12865 /* No need attach mpt3sas raid functions template
12866 * if hbas_to_enumarate value is one.
12868 if (hbas_to_enumerate != 1) {
12869 mpt3sas_raid_template =
12870 raid_class_attach(&mpt3sas_raid_functions);
12871 if (!mpt3sas_raid_template) {
12872 sas_release_transport(mpt3sas_transport_template);
12877 /* No need to attach mpt2sas raid functions template
12878 * if hbas_to_enumarate value is two
12880 if (hbas_to_enumerate != 2) {
12881 mpt2sas_raid_template =
12882 raid_class_attach(&mpt2sas_raid_functions);
12883 if (!mpt2sas_raid_template) {
12884 sas_release_transport(mpt3sas_transport_template);
12889 error = scsih_init();
12895 mpt3sas_ctl_init(hbas_to_enumerate);
12897 error = pci_register_driver(&mpt3sas_driver);
12899 mpt3sas_ctl_exit(hbas_to_enumerate);
12907 * _mpt3sas_exit - exit point for this driver (when it is a module).
12911 _mpt3sas_exit(void)
12913 pr_info("mpt3sas version %s unloading\n",
12914 MPT3SAS_DRIVER_VERSION);
12916 pci_unregister_driver(&mpt3sas_driver);
12918 mpt3sas_ctl_exit(hbas_to_enumerate);
12923 module_init(_mpt3sas_init);
12924 module_exit(_mpt3sas_exit);