1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (c) 2015 Linaro Ltd.
4 * Copyright (c) 2015 Hisilicon Limited.
8 #define DRV_NAME "hisi_sas"
10 #define DEV_IS_GONE(dev) \
11 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
13 static int hisi_sas_softreset_ata_disk(struct domain_device *device);
14 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
16 static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
17 struct domain_device *device);
18 static void hisi_sas_dev_gone(struct domain_device *device);
20 struct hisi_sas_internal_abort_data {
21 bool rst_ha_timeout; /* reset the HA for timeout */
24 u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
26 switch (fis->command) {
27 case ATA_CMD_FPDMA_WRITE:
28 case ATA_CMD_FPDMA_READ:
29 case ATA_CMD_FPDMA_RECV:
30 case ATA_CMD_FPDMA_SEND:
31 case ATA_CMD_NCQ_NON_DATA:
32 return HISI_SAS_SATA_PROTOCOL_FPDMA;
34 case ATA_CMD_DOWNLOAD_MICRO:
36 case ATA_CMD_PMP_READ:
37 case ATA_CMD_READ_LOG_EXT:
38 case ATA_CMD_PIO_READ:
39 case ATA_CMD_PIO_READ_EXT:
40 case ATA_CMD_PMP_WRITE:
41 case ATA_CMD_WRITE_LOG_EXT:
42 case ATA_CMD_PIO_WRITE:
43 case ATA_CMD_PIO_WRITE_EXT:
44 return HISI_SAS_SATA_PROTOCOL_PIO;
47 case ATA_CMD_DOWNLOAD_MICRO_DMA:
48 case ATA_CMD_PMP_READ_DMA:
49 case ATA_CMD_PMP_WRITE_DMA:
51 case ATA_CMD_READ_EXT:
52 case ATA_CMD_READ_LOG_DMA_EXT:
53 case ATA_CMD_READ_STREAM_DMA_EXT:
54 case ATA_CMD_TRUSTED_RCV_DMA:
55 case ATA_CMD_TRUSTED_SND_DMA:
57 case ATA_CMD_WRITE_EXT:
58 case ATA_CMD_WRITE_FUA_EXT:
59 case ATA_CMD_WRITE_QUEUED:
60 case ATA_CMD_WRITE_LOG_DMA_EXT:
61 case ATA_CMD_WRITE_STREAM_DMA_EXT:
62 case ATA_CMD_ZAC_MGMT_IN:
63 return HISI_SAS_SATA_PROTOCOL_DMA;
65 case ATA_CMD_CHK_POWER:
66 case ATA_CMD_DEV_RESET:
69 case ATA_CMD_FLUSH_EXT:
71 case ATA_CMD_VERIFY_EXT:
72 case ATA_CMD_SET_FEATURES:
74 case ATA_CMD_STANDBYNOW1:
75 case ATA_CMD_ZAC_MGMT_OUT:
76 return HISI_SAS_SATA_PROTOCOL_NONDATA;
79 switch (fis->features) {
80 case ATA_SET_MAX_PASSWD:
81 case ATA_SET_MAX_LOCK:
82 return HISI_SAS_SATA_PROTOCOL_PIO;
84 case ATA_SET_MAX_PASSWD_DMA:
85 case ATA_SET_MAX_UNLOCK_DMA:
86 return HISI_SAS_SATA_PROTOCOL_DMA;
89 return HISI_SAS_SATA_PROTOCOL_NONDATA;
94 if (direction == DMA_NONE)
95 return HISI_SAS_SATA_PROTOCOL_NONDATA;
96 return HISI_SAS_SATA_PROTOCOL_PIO;
100 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);
102 void hisi_sas_sata_done(struct sas_task *task,
103 struct hisi_sas_slot *slot)
105 struct task_status_struct *ts = &task->task_status;
106 struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf;
107 struct hisi_sas_status_buffer *status_buf =
108 hisi_sas_status_buf_addr_mem(slot);
109 u8 *iu = &status_buf->iu[0];
110 struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu;
112 resp->frame_len = sizeof(struct dev_to_host_fis);
113 memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));
115 ts->buf_valid_size = sizeof(*resp);
117 EXPORT_SYMBOL_GPL(hisi_sas_sata_done);
120 * This function assumes linkrate mask fits in 8 bits, which it
121 * does for all HW versions supported.
123 u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max)
128 max -= SAS_LINK_RATE_1_5_GBPS;
129 for (i = 0; i <= max; i++)
130 rate |= 1 << (i * 2);
133 EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask);
135 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
137 return device->port->ha->lldd_ha;
140 struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port)
142 return container_of(sas_port, struct hisi_sas_port, sas_port);
144 EXPORT_SYMBOL_GPL(to_hisi_sas_port);
146 void hisi_sas_stop_phys(struct hisi_hba *hisi_hba)
150 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++)
151 hisi_sas_phy_enable(hisi_hba, phy_no, 0);
153 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys);
155 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
157 void *bitmap = hisi_hba->slot_index_tags;
159 __clear_bit(slot_idx, bitmap);
162 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
164 if (hisi_hba->hw->slot_index_alloc ||
165 slot_idx >= HISI_SAS_UNRESERVED_IPTT) {
166 spin_lock(&hisi_hba->lock);
167 hisi_sas_slot_index_clear(hisi_hba, slot_idx);
168 spin_unlock(&hisi_hba->lock);
172 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
174 void *bitmap = hisi_hba->slot_index_tags;
176 __set_bit(slot_idx, bitmap);
179 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba,
180 struct scsi_cmnd *scsi_cmnd)
183 void *bitmap = hisi_hba->slot_index_tags;
186 return scsi_cmd_to_rq(scsi_cmnd)->tag;
188 spin_lock(&hisi_hba->lock);
189 index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
190 hisi_hba->last_slot_index + 1);
191 if (index >= hisi_hba->slot_index_count) {
192 index = find_next_zero_bit(bitmap,
193 hisi_hba->slot_index_count,
194 HISI_SAS_UNRESERVED_IPTT);
195 if (index >= hisi_hba->slot_index_count) {
196 spin_unlock(&hisi_hba->lock);
197 return -SAS_QUEUE_FULL;
200 hisi_sas_slot_index_set(hisi_hba, index);
201 hisi_hba->last_slot_index = index;
202 spin_unlock(&hisi_hba->lock);
207 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
208 struct hisi_sas_slot *slot)
210 int device_id = slot->device_id;
211 struct hisi_sas_device *sas_dev = &hisi_hba->devices[device_id];
214 struct device *dev = hisi_hba->dev;
216 if (!task->lldd_task)
219 task->lldd_task = NULL;
221 if (!sas_protocol_ata(task->task_proto)) {
223 dma_unmap_sg(dev, task->scatter,
226 if (slot->n_elem_dif) {
227 struct sas_ssp_task *ssp_task = &task->ssp_task;
228 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
230 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd),
231 scsi_prot_sg_count(scsi_cmnd),
237 spin_lock(&sas_dev->lock);
238 list_del_init(&slot->entry);
239 spin_unlock(&sas_dev->lock);
241 memset(slot, 0, offsetof(struct hisi_sas_slot, buf));
243 hisi_sas_slot_index_free(hisi_hba, slot->idx);
245 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
247 static void hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba,
248 struct hisi_sas_slot *slot)
250 hisi_hba->hw->prep_smp(hisi_hba, slot);
253 static void hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
254 struct hisi_sas_slot *slot)
256 hisi_hba->hw->prep_ssp(hisi_hba, slot);
259 static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
260 struct hisi_sas_slot *slot)
262 hisi_hba->hw->prep_stp(hisi_hba, slot);
265 static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
266 struct hisi_sas_slot *slot)
268 hisi_hba->hw->prep_abort(hisi_hba, slot);
271 static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba,
272 struct sas_task *task, int n_elem,
275 struct device *dev = hisi_hba->dev;
277 if (!sas_protocol_ata(task->task_proto)) {
278 if (task->num_scatter) {
280 dma_unmap_sg(dev, task->scatter,
283 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
285 dma_unmap_sg(dev, &task->smp_task.smp_req,
291 static int hisi_sas_dma_map(struct hisi_hba *hisi_hba,
292 struct sas_task *task, int *n_elem,
295 struct device *dev = hisi_hba->dev;
298 if (sas_protocol_ata(task->task_proto)) {
299 *n_elem = task->num_scatter;
301 unsigned int req_len;
303 if (task->num_scatter) {
304 *n_elem = dma_map_sg(dev, task->scatter,
305 task->num_scatter, task->data_dir);
310 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
311 *n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req,
317 req_len = sg_dma_len(&task->smp_task.smp_req);
320 goto err_out_dma_unmap;
325 if (*n_elem > HISI_SAS_SGE_PAGE_CNT) {
326 dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT\n",
329 goto err_out_dma_unmap;
334 /* It would be better to call dma_unmap_sg() here, but it's messy */
335 hisi_sas_dma_unmap(hisi_hba, task, *n_elem,
341 static void hisi_sas_dif_dma_unmap(struct hisi_hba *hisi_hba,
342 struct sas_task *task, int n_elem_dif)
344 struct device *dev = hisi_hba->dev;
347 struct sas_ssp_task *ssp_task = &task->ssp_task;
348 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
350 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd),
351 scsi_prot_sg_count(scsi_cmnd),
356 static int hisi_sas_dif_dma_map(struct hisi_hba *hisi_hba,
357 int *n_elem_dif, struct sas_task *task)
359 struct device *dev = hisi_hba->dev;
360 struct sas_ssp_task *ssp_task;
361 struct scsi_cmnd *scsi_cmnd;
364 if (task->num_scatter) {
365 ssp_task = &task->ssp_task;
366 scsi_cmnd = ssp_task->cmd;
368 if (scsi_prot_sg_count(scsi_cmnd)) {
369 *n_elem_dif = dma_map_sg(dev,
370 scsi_prot_sglist(scsi_cmnd),
371 scsi_prot_sg_count(scsi_cmnd),
377 if (*n_elem_dif > HISI_SAS_SGE_DIF_PAGE_CNT) {
378 dev_err(dev, "task prep: n_elem_dif(%d) too large\n",
381 goto err_out_dif_dma_unmap;
388 err_out_dif_dma_unmap:
389 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd),
390 scsi_prot_sg_count(scsi_cmnd), task->data_dir);
395 void hisi_sas_task_deliver(struct hisi_hba *hisi_hba,
396 struct hisi_sas_slot *slot,
397 struct hisi_sas_dq *dq,
398 struct hisi_sas_device *sas_dev)
400 struct hisi_sas_cmd_hdr *cmd_hdr_base;
401 int dlvry_queue_slot, dlvry_queue;
402 struct sas_task *task = slot->task;
405 spin_lock(&dq->lock);
406 wr_q_index = dq->wr_point;
407 dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
408 list_add_tail(&slot->delivery, &dq->list);
409 spin_unlock(&dq->lock);
410 spin_lock(&sas_dev->lock);
411 list_add_tail(&slot->entry, &sas_dev->list);
412 spin_unlock(&sas_dev->lock);
414 dlvry_queue = dq->id;
415 dlvry_queue_slot = wr_q_index;
417 slot->device_id = sas_dev->device_id;
418 slot->dlvry_queue = dlvry_queue;
419 slot->dlvry_queue_slot = dlvry_queue_slot;
420 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
421 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
423 task->lldd_task = slot;
425 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
426 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
427 memset(hisi_sas_status_buf_addr_mem(slot), 0,
428 sizeof(struct hisi_sas_err_record));
430 switch (task->task_proto) {
431 case SAS_PROTOCOL_SMP:
432 hisi_sas_task_prep_smp(hisi_hba, slot);
434 case SAS_PROTOCOL_SSP:
435 hisi_sas_task_prep_ssp(hisi_hba, slot);
437 case SAS_PROTOCOL_SATA:
438 case SAS_PROTOCOL_STP:
439 case SAS_PROTOCOL_STP_ALL:
440 hisi_sas_task_prep_ata(hisi_hba, slot);
442 case SAS_PROTOCOL_INTERNAL_ABORT:
443 hisi_sas_task_prep_abort(hisi_hba, slot);
449 /* Make slot memories observable before marking as ready */
451 WRITE_ONCE(slot->ready, 1);
453 spin_lock(&dq->lock);
454 hisi_hba->hw->start_delivery(dq);
455 spin_unlock(&dq->lock);
458 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
460 int n_elem = 0, n_elem_dif = 0, n_elem_req = 0;
461 struct domain_device *device = task->dev;
462 struct asd_sas_port *sas_port = device->port;
463 struct hisi_sas_device *sas_dev = device->lldd_dev;
464 bool internal_abort = sas_is_internal_abort(task);
465 struct scsi_cmnd *scmd = NULL;
466 struct hisi_sas_dq *dq = NULL;
467 struct hisi_sas_port *port;
468 struct hisi_hba *hisi_hba;
469 struct hisi_sas_slot *slot;
474 struct task_status_struct *ts = &task->task_status;
476 ts->resp = SAS_TASK_UNDELIVERED;
477 ts->stat = SAS_PHY_DOWN;
479 * libsas will use dev->port, should
480 * not call task_done for sata
482 if (device->dev_type != SAS_SATA_DEV && !internal_abort)
483 task->task_done(task);
487 hisi_hba = dev_to_hisi_hba(device);
490 switch (task->task_proto) {
491 case SAS_PROTOCOL_SSP:
492 case SAS_PROTOCOL_SMP:
493 case SAS_PROTOCOL_SATA:
494 case SAS_PROTOCOL_STP:
495 case SAS_PROTOCOL_STP_ALL:
496 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) {
497 if (!gfpflags_allow_blocking(gfp_flags))
500 down(&hisi_hba->sem);
504 if (DEV_IS_GONE(sas_dev)) {
506 dev_info(dev, "task prep: device %d not ready\n",
509 dev_info(dev, "task prep: device %016llx not ready\n",
510 SAS_ADDR(device->sas_addr));
515 port = to_hisi_sas_port(sas_port);
516 if (!port->port_attached) {
517 dev_info(dev, "task prep: %s port%d not attach device\n",
518 dev_is_sata(device) ? "SATA/STP" : "SAS",
524 if (task->uldd_task) {
525 struct ata_queued_cmd *qc;
527 if (dev_is_sata(device)) {
528 qc = task->uldd_task;
531 scmd = task->uldd_task;
536 unsigned int dq_index;
539 blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
540 dq_index = blk_mq_unique_tag_to_hwq(blk_tag);
541 dq = &hisi_hba->dq[dq_index];
543 struct Scsi_Host *shost = hisi_hba->shost;
544 struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
545 int queue = qmap->mq_map[raw_smp_processor_id()];
547 dq = &hisi_hba->dq[queue];
550 case SAS_PROTOCOL_INTERNAL_ABORT:
551 if (!hisi_hba->hw->prep_abort)
552 return TMF_RESP_FUNC_FAILED;
554 if (test_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags))
557 hisi_hba = dev_to_hisi_hba(device);
559 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
562 port = to_hisi_sas_port(sas_port);
563 dq = &hisi_hba->dq[task->abort_task.qid];
566 dev_err(hisi_hba->dev, "task prep: unknown/unsupported proto (0x%x)\n",
571 rc = hisi_sas_dma_map(hisi_hba, task, &n_elem,
576 if (!sas_protocol_ata(task->task_proto)) {
577 rc = hisi_sas_dif_dma_map(hisi_hba, &n_elem_dif, task);
579 goto err_out_dma_unmap;
582 if (!internal_abort && hisi_hba->hw->slot_index_alloc)
583 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device);
585 rc = hisi_sas_slot_index_alloc(hisi_hba, scmd);
588 goto err_out_dif_dma_unmap;
590 slot = &hisi_hba->slot_info[rc];
591 slot->n_elem = n_elem;
592 slot->n_elem_dif = n_elem_dif;
596 slot->tmf = task->tmf;
597 slot->is_internal = !!task->tmf || internal_abort;
599 /* protect task_prep and start_delivery sequence */
600 hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev);
604 err_out_dif_dma_unmap:
605 if (!sas_protocol_ata(task->task_proto))
606 hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif);
608 hisi_sas_dma_unmap(hisi_hba, task, n_elem,
611 dev_err(dev, "task exec: failed[%d]!\n", rc);
615 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no,
618 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
619 struct asd_sas_phy *sas_phy = &phy->sas_phy;
621 if (!phy->phy_attached)
624 sas_notify_phy_event(sas_phy, PHYE_OOB_DONE, gfp_flags);
627 struct sas_phy *sphy = sas_phy->phy;
629 sphy->negotiated_linkrate = sas_phy->linkrate;
630 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
631 sphy->maximum_linkrate_hw =
632 hisi_hba->hw->phy_get_max_linkrate();
633 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN)
634 sphy->minimum_linkrate = phy->minimum_linkrate;
636 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN)
637 sphy->maximum_linkrate = phy->maximum_linkrate;
640 if (phy->phy_type & PORT_TYPE_SAS) {
641 struct sas_identify_frame *id;
643 id = (struct sas_identify_frame *)phy->frame_rcvd;
644 id->dev_type = phy->identify.device_type;
645 id->initiator_bits = SAS_PROTOCOL_ALL;
646 id->target_bits = phy->identify.target_port_protocols;
647 } else if (phy->phy_type & PORT_TYPE_SATA) {
651 sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
652 sas_notify_port_event(sas_phy, PORTE_BYTES_DMAED, gfp_flags);
655 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
657 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
658 struct hisi_sas_device *sas_dev = NULL;
659 int last = hisi_hba->last_dev_id;
660 int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES;
663 spin_lock(&hisi_hba->lock);
664 for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) {
665 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
666 int queue = i % hisi_hba->queue_count;
667 struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
669 hisi_hba->devices[i].device_id = i;
670 sas_dev = &hisi_hba->devices[i];
671 sas_dev->dev_status = HISI_SAS_DEV_INIT;
672 sas_dev->dev_type = device->dev_type;
673 sas_dev->hisi_hba = hisi_hba;
674 sas_dev->sas_device = device;
676 spin_lock_init(&sas_dev->lock);
677 INIT_LIST_HEAD(&hisi_hba->devices[i].list);
682 hisi_hba->last_dev_id = i;
683 spin_unlock(&hisi_hba->lock);
688 static void hisi_sas_tmf_aborted(struct sas_task *task)
690 struct hisi_sas_slot *slot = task->lldd_task;
691 struct domain_device *device = task->dev;
692 struct hisi_sas_device *sas_dev = device->lldd_dev;
693 struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
696 struct hisi_sas_cq *cq =
697 &hisi_hba->cq[slot->dlvry_queue];
699 * sync irq to avoid free'ing task
700 * before using task in IO completion
702 synchronize_irq(cq->irq_no);
707 #define HISI_SAS_DISK_RECOVER_CNT 3
708 static int hisi_sas_init_device(struct domain_device *device)
710 int rc = TMF_RESP_FUNC_COMPLETE;
712 int retry = HISI_SAS_DISK_RECOVER_CNT;
713 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
715 switch (device->dev_type) {
717 int_to_scsilun(0, &lun);
719 while (retry-- > 0) {
720 rc = sas_clear_task_set(device, lun.scsi_lun);
721 if (rc == TMF_RESP_FUNC_COMPLETE) {
722 hisi_sas_release_task(hisi_hba, device);
729 case SAS_SATA_PM_PORT:
730 case SAS_SATA_PENDING:
732 * If an expander is swapped when a SATA disk is attached then
733 * we should issue a hard reset to clear previous affiliation
734 * of STP target port, see SPL (chapter 6.19.4).
736 * However we don't need to issue a hard reset here for these
738 * a. When probing the device, libsas/libata already issues a
739 * hard reset in sas_probe_sata() -> ata_sas_async_probe().
740 * Note that in hisi_sas_debug_I_T_nexus_reset() we take care
741 * to issue a hard reset by checking the dev status (== INIT).
742 * b. When resetting the controller, this is simply unnecessary.
744 while (retry-- > 0) {
745 rc = hisi_sas_softreset_ata_disk(device);
757 int hisi_sas_slave_alloc(struct scsi_device *sdev)
759 struct domain_device *ddev = sdev_to_domain_dev(sdev);
760 struct hisi_sas_device *sas_dev = ddev->lldd_dev;
763 rc = sas_slave_alloc(sdev);
767 rc = hisi_sas_init_device(ddev);
770 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
773 EXPORT_SYMBOL_GPL(hisi_sas_slave_alloc);
775 static int hisi_sas_dev_found(struct domain_device *device)
777 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
778 struct domain_device *parent_dev = device->parent;
779 struct hisi_sas_device *sas_dev;
780 struct device *dev = hisi_hba->dev;
783 if (hisi_hba->hw->alloc_dev)
784 sas_dev = hisi_hba->hw->alloc_dev(device);
786 sas_dev = hisi_sas_alloc_dev(device);
788 dev_err(dev, "fail alloc dev: max support %d devices\n",
789 HISI_SAS_MAX_DEVICES);
793 device->lldd_dev = sas_dev;
794 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
796 if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
798 u8 phy_num = parent_dev->ex_dev.num_phys;
801 for (phy_no = 0; phy_no < phy_num; phy_no++) {
802 phy = &parent_dev->ex_dev.ex_phy[phy_no];
803 if (SAS_ADDR(phy->attached_sas_addr) ==
804 SAS_ADDR(device->sas_addr))
808 if (phy_no == phy_num) {
809 dev_info(dev, "dev found: no attached "
810 "dev:%016llx at ex:%016llx\n",
811 SAS_ADDR(device->sas_addr),
812 SAS_ADDR(parent_dev->sas_addr));
818 dev_info(dev, "dev[%d:%x] found\n",
819 sas_dev->device_id, sas_dev->dev_type);
824 hisi_sas_dev_gone(device);
828 int hisi_sas_slave_configure(struct scsi_device *sdev)
830 struct domain_device *dev = sdev_to_domain_dev(sdev);
831 int ret = sas_slave_configure(sdev);
835 if (!dev_is_sata(dev))
836 sas_change_queue_depth(sdev, 64);
840 EXPORT_SYMBOL_GPL(hisi_sas_slave_configure);
842 void hisi_sas_scan_start(struct Scsi_Host *shost)
844 struct hisi_hba *hisi_hba = shost_priv(shost);
846 hisi_hba->hw->phys_init(hisi_hba);
848 EXPORT_SYMBOL_GPL(hisi_sas_scan_start);
850 int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
852 struct hisi_hba *hisi_hba = shost_priv(shost);
853 struct sas_ha_struct *sha = &hisi_hba->sha;
855 /* Wait for PHY up interrupt to occur */
862 EXPORT_SYMBOL_GPL(hisi_sas_scan_finished);
864 static void hisi_sas_phyup_work_common(struct work_struct *work,
865 enum hisi_sas_phy_event event)
867 struct hisi_sas_phy *phy =
868 container_of(work, typeof(*phy), works[event]);
869 struct hisi_hba *hisi_hba = phy->hisi_hba;
870 struct asd_sas_phy *sas_phy = &phy->sas_phy;
871 int phy_no = sas_phy->id;
873 phy->wait_phyup_cnt = 0;
874 if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP)
875 hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no);
876 hisi_sas_bytes_dmaed(hisi_hba, phy_no, GFP_KERNEL);
879 static void hisi_sas_phyup_work(struct work_struct *work)
881 hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP);
884 static void hisi_sas_linkreset_work(struct work_struct *work)
886 struct hisi_sas_phy *phy =
887 container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]);
888 struct asd_sas_phy *sas_phy = &phy->sas_phy;
890 hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL);
893 static void hisi_sas_phyup_pm_work(struct work_struct *work)
895 struct hisi_sas_phy *phy =
896 container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP_PM]);
897 struct hisi_hba *hisi_hba = phy->hisi_hba;
898 struct device *dev = hisi_hba->dev;
900 hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP_PM);
901 pm_runtime_put_sync(dev);
904 static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = {
905 [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work,
906 [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work,
907 [HISI_PHYE_PHY_UP_PM] = hisi_sas_phyup_pm_work,
910 bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
911 enum hisi_sas_phy_event event)
913 struct hisi_hba *hisi_hba = phy->hisi_hba;
915 if (WARN_ON(event >= HISI_PHYES_NUM))
918 return queue_work(hisi_hba->wq, &phy->works[event]);
920 EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event);
922 static void hisi_sas_wait_phyup_timedout(struct timer_list *t)
924 struct hisi_sas_phy *phy = from_timer(phy, t, timer);
925 struct hisi_hba *hisi_hba = phy->hisi_hba;
926 struct device *dev = hisi_hba->dev;
927 int phy_no = phy->sas_phy.id;
929 dev_warn(dev, "phy%d wait phyup timeout, issuing link reset\n", phy_no);
930 hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
933 #define HISI_SAS_WAIT_PHYUP_RETRIES 10
935 void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no)
937 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
938 struct device *dev = hisi_hba->dev;
941 dev_dbg(dev, "phy%d OOB ready\n", phy_no);
942 spin_lock_irqsave(&phy->lock, flags);
943 if (phy->phy_attached) {
944 spin_unlock_irqrestore(&phy->lock, flags);
948 if (!timer_pending(&phy->timer)) {
949 if (phy->wait_phyup_cnt < HISI_SAS_WAIT_PHYUP_RETRIES) {
950 phy->wait_phyup_cnt++;
951 phy->timer.expires = jiffies +
952 HISI_SAS_WAIT_PHYUP_TIMEOUT;
953 add_timer(&phy->timer);
954 spin_unlock_irqrestore(&phy->lock, flags);
958 dev_warn(dev, "phy%d failed to come up %d times, giving up\n",
959 phy_no, phy->wait_phyup_cnt);
960 phy->wait_phyup_cnt = 0;
962 spin_unlock_irqrestore(&phy->lock, flags);
965 EXPORT_SYMBOL_GPL(hisi_sas_phy_oob_ready);
967 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
969 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
970 struct asd_sas_phy *sas_phy = &phy->sas_phy;
973 phy->hisi_hba = hisi_hba;
975 phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
976 phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate();
977 sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
978 sas_phy->class = SAS;
979 sas_phy->iproto = SAS_PROTOCOL_ALL;
981 sas_phy->type = PHY_TYPE_PHYSICAL;
982 sas_phy->role = PHY_ROLE_INITIATOR;
983 sas_phy->oob_mode = OOB_NOT_CONNECTED;
984 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
985 sas_phy->id = phy_no;
986 sas_phy->sas_addr = &hisi_hba->sas_addr[0];
987 sas_phy->frame_rcvd = &phy->frame_rcvd[0];
988 sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
989 sas_phy->lldd_phy = phy;
991 for (i = 0; i < HISI_PHYES_NUM; i++)
992 INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]);
994 spin_lock_init(&phy->lock);
996 timer_setup(&phy->timer, hisi_sas_wait_phyup_timedout, 0);
999 /* Wrapper to ensure we track hisi_sas_phy.enable properly */
1000 void hisi_sas_phy_enable(struct hisi_hba *hisi_hba, int phy_no, int enable)
1002 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1003 struct asd_sas_phy *aphy = &phy->sas_phy;
1004 struct sas_phy *sphy = aphy->phy;
1005 unsigned long flags;
1007 spin_lock_irqsave(&phy->lock, flags);
1010 /* We may have been enabled already; if so, don't touch */
1012 sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
1013 hisi_hba->hw->phy_start(hisi_hba, phy_no);
1015 sphy->negotiated_linkrate = SAS_PHY_DISABLED;
1016 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
1018 phy->enable = enable;
1019 spin_unlock_irqrestore(&phy->lock, flags);
1021 EXPORT_SYMBOL_GPL(hisi_sas_phy_enable);
1023 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
1025 struct sas_ha_struct *sas_ha = sas_phy->ha;
1026 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
1027 struct hisi_sas_phy *phy = sas_phy->lldd_phy;
1028 struct asd_sas_port *sas_port = sas_phy->port;
1029 struct hisi_sas_port *port;
1030 unsigned long flags;
1035 port = to_hisi_sas_port(sas_port);
1036 spin_lock_irqsave(&hisi_hba->lock, flags);
1037 port->port_attached = 1;
1038 port->id = phy->port_id;
1040 sas_port->lldd_port = port;
1041 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1044 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task,
1045 struct hisi_sas_slot *slot)
1048 unsigned long flags;
1049 struct task_status_struct *ts;
1051 ts = &task->task_status;
1053 ts->resp = SAS_TASK_COMPLETE;
1054 ts->stat = SAS_ABORTED_TASK;
1055 spin_lock_irqsave(&task->task_state_lock, flags);
1056 task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
1057 if (!slot->is_internal && task->task_proto != SAS_PROTOCOL_SMP)
1058 task->task_state_flags |= SAS_TASK_STATE_DONE;
1059 spin_unlock_irqrestore(&task->task_state_lock, flags);
1062 hisi_sas_slot_task_free(hisi_hba, task, slot);
1065 static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
1066 struct domain_device *device)
1068 struct hisi_sas_slot *slot, *slot2;
1069 struct hisi_sas_device *sas_dev = device->lldd_dev;
1071 list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry)
1072 hisi_sas_do_release_task(hisi_hba, slot->task, slot);
1075 void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
1077 struct hisi_sas_device *sas_dev;
1078 struct domain_device *device;
1081 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1082 sas_dev = &hisi_hba->devices[i];
1083 device = sas_dev->sas_device;
1085 if ((sas_dev->dev_type == SAS_PHY_UNUSED) ||
1089 hisi_sas_release_task(hisi_hba, device);
1092 EXPORT_SYMBOL_GPL(hisi_sas_release_tasks);
1094 static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
1095 struct domain_device *device)
1097 if (hisi_hba->hw->dereg_device)
1098 hisi_hba->hw->dereg_device(hisi_hba, device);
1102 hisi_sas_internal_task_abort_dev(struct hisi_sas_device *sas_dev,
1103 bool rst_ha_timeout)
1105 struct hisi_sas_internal_abort_data data = { rst_ha_timeout };
1106 struct domain_device *device = sas_dev->sas_device;
1107 struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
1110 for (i = 0; i < hisi_hba->cq_nvecs; i++) {
1111 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1112 const struct cpumask *mask = cq->irq_mask;
1114 if (mask && !cpumask_intersects(cpu_online_mask, mask))
1116 rc = sas_execute_internal_abort_dev(device, i, &data);
1124 static void hisi_sas_dev_gone(struct domain_device *device)
1126 struct hisi_sas_device *sas_dev = device->lldd_dev;
1127 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1128 struct device *dev = hisi_hba->dev;
1131 dev_info(dev, "dev[%d:%x] is gone\n",
1132 sas_dev->device_id, sas_dev->dev_type);
1134 down(&hisi_hba->sem);
1135 if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) {
1136 hisi_sas_internal_task_abort_dev(sas_dev, true);
1138 hisi_sas_dereg_device(hisi_hba, device);
1140 ret = hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
1141 device->lldd_dev = NULL;
1144 if (hisi_hba->hw->free_device)
1145 hisi_hba->hw->free_device(sas_dev);
1147 /* Don't mark it as SAS_PHY_UNUSED if failed to clear ITCT */
1149 sas_dev->dev_type = SAS_PHY_UNUSED;
1150 sas_dev->sas_device = NULL;
1154 static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
1155 struct sas_phy_linkrates *r)
1157 struct sas_phy_linkrates _r;
1159 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1160 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1161 enum sas_linkrate min, max;
1163 if (r->minimum_linkrate > SAS_LINK_RATE_1_5_GBPS)
1166 if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
1167 max = sas_phy->phy->maximum_linkrate;
1168 min = r->minimum_linkrate;
1169 } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) {
1170 max = r->maximum_linkrate;
1171 min = sas_phy->phy->minimum_linkrate;
1175 _r.maximum_linkrate = max;
1176 _r.minimum_linkrate = min;
1178 sas_phy->phy->maximum_linkrate = max;
1179 sas_phy->phy->minimum_linkrate = min;
1181 hisi_sas_phy_enable(hisi_hba, phy_no, 0);
1183 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r);
1184 hisi_sas_phy_enable(hisi_hba, phy_no, 1);
1189 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
1192 struct hisi_sas_phy *phy = container_of(sas_phy,
1193 struct hisi_sas_phy, sas_phy);
1194 struct sas_ha_struct *sas_ha = sas_phy->ha;
1195 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
1196 struct device *dev = hisi_hba->dev;
1197 DECLARE_COMPLETION_ONSTACK(completion);
1198 int phy_no = sas_phy->id;
1199 u8 sts = phy->phy_attached;
1202 down(&hisi_hba->sem);
1203 phy->reset_completion = &completion;
1206 case PHY_FUNC_HARD_RESET:
1207 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
1210 case PHY_FUNC_LINK_RESET:
1211 hisi_sas_phy_enable(hisi_hba, phy_no, 0);
1213 hisi_sas_phy_enable(hisi_hba, phy_no, 1);
1216 case PHY_FUNC_DISABLE:
1217 hisi_sas_phy_enable(hisi_hba, phy_no, 0);
1220 case PHY_FUNC_SET_LINK_RATE:
1221 ret = hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata);
1224 case PHY_FUNC_GET_EVENTS:
1225 if (hisi_hba->hw->get_events) {
1226 hisi_hba->hw->get_events(hisi_hba, phy_no);
1230 case PHY_FUNC_RELEASE_SPINUP_HOLD:
1236 if (sts && !wait_for_completion_timeout(&completion,
1237 HISI_SAS_WAIT_PHYUP_TIMEOUT)) {
1238 dev_warn(dev, "phy%d wait phyup timed out for func %d\n",
1245 phy->reset_completion = NULL;
1251 static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev,
1252 bool reset, int pmp, u8 *fis)
1254 struct ata_taskfile tf;
1256 ata_tf_init(dev, &tf);
1260 tf.ctl &= ~ATA_SRST;
1261 tf.command = ATA_CMD_DEV_RESET;
1262 ata_tf_to_fis(&tf, pmp, 0, fis);
1265 static int hisi_sas_softreset_ata_disk(struct domain_device *device)
1268 struct ata_port *ap = device->sata_dev.ap;
1269 struct ata_link *link;
1270 int rc = TMF_RESP_FUNC_FAILED;
1271 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1272 struct device *dev = hisi_hba->dev;
1274 ata_for_each_link(link, ap, EDGE) {
1275 int pmp = sata_srst_pmp(link);
1277 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
1278 rc = sas_execute_ata_cmd(device, fis, -1);
1279 if (rc != TMF_RESP_FUNC_COMPLETE)
1283 if (rc == TMF_RESP_FUNC_COMPLETE) {
1284 ata_for_each_link(link, ap, EDGE) {
1285 int pmp = sata_srst_pmp(link);
1287 hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis);
1288 rc = sas_execute_ata_cmd(device, fis, -1);
1289 if (rc != TMF_RESP_FUNC_COMPLETE)
1290 dev_err(dev, "ata disk %016llx de-reset failed\n",
1291 SAS_ADDR(device->sas_addr));
1294 dev_err(dev, "ata disk %016llx reset failed\n",
1295 SAS_ADDR(device->sas_addr));
1298 if (rc == TMF_RESP_FUNC_COMPLETE)
1299 hisi_sas_release_task(hisi_hba, device);
1304 static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
1306 u32 state = hisi_hba->hw->get_phys_state(hisi_hba);
1309 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1310 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1311 struct domain_device *device = sas_dev->sas_device;
1312 struct asd_sas_port *sas_port;
1313 struct hisi_sas_port *port;
1314 struct hisi_sas_phy *phy = NULL;
1315 struct asd_sas_phy *sas_phy;
1317 if ((sas_dev->dev_type == SAS_PHY_UNUSED)
1318 || !device || !device->port)
1321 sas_port = device->port;
1322 port = to_hisi_sas_port(sas_port);
1324 spin_lock(&sas_port->phy_list_lock);
1325 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el)
1326 if (state & BIT(sas_phy->id)) {
1327 phy = sas_phy->lldd_phy;
1330 spin_unlock(&sas_port->phy_list_lock);
1333 port->id = phy->port_id;
1335 /* Update linkrate of directly attached device. */
1336 if (!device->parent)
1337 device->linkrate = phy->sas_phy.linkrate;
1339 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
1345 static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state)
1347 struct asd_sas_port *_sas_port = NULL;
1350 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
1351 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1352 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1353 struct asd_sas_port *sas_port = sas_phy->port;
1354 bool do_port_check = _sas_port != sas_port;
1356 if (!sas_phy->phy->enabled)
1359 /* Report PHY state change to libsas */
1360 if (state & BIT(phy_no)) {
1361 if (do_port_check && sas_port && sas_port->port_dev) {
1362 struct domain_device *dev = sas_port->port_dev;
1364 _sas_port = sas_port;
1366 if (dev_is_expander(dev->dev_type))
1367 sas_notify_port_event(sas_phy,
1368 PORTE_BROADCAST_RCVD,
1372 hisi_sas_phy_down(hisi_hba, phy_no, 0, GFP_KERNEL);
1377 static void hisi_sas_reset_init_all_devices(struct hisi_hba *hisi_hba)
1379 struct hisi_sas_device *sas_dev;
1380 struct domain_device *device;
1383 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1384 sas_dev = &hisi_hba->devices[i];
1385 device = sas_dev->sas_device;
1387 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device)
1390 hisi_sas_init_device(device);
1394 static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba,
1395 struct asd_sas_port *sas_port,
1396 struct domain_device *device)
1398 struct ata_port *ap = device->sata_dev.ap;
1399 struct device *dev = hisi_hba->dev;
1400 int rc = TMF_RESP_FUNC_FAILED;
1401 struct ata_link *link;
1405 for (i = 0; i < hisi_hba->n_phy; i++) {
1406 if (!(sas_port->phy_mask & BIT(i)))
1409 ata_for_each_link(link, ap, EDGE) {
1410 int pmp = sata_srst_pmp(link);
1412 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
1413 rc = sas_execute_ata_cmd(device, fis, i);
1414 if (rc != TMF_RESP_FUNC_COMPLETE) {
1415 dev_err(dev, "phy%d ata reset failed rc=%d\n",
1423 static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba)
1425 struct device *dev = hisi_hba->dev;
1428 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1429 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1430 struct domain_device *device = sas_dev->sas_device;
1432 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device)
1435 rc = hisi_sas_internal_task_abort_dev(sas_dev, false);
1437 dev_err(dev, "STP reject: abort dev failed %d\n", rc);
1440 for (port_no = 0; port_no < hisi_hba->n_phy; port_no++) {
1441 struct hisi_sas_port *port = &hisi_hba->port[port_no];
1442 struct asd_sas_port *sas_port = &port->sas_port;
1443 struct domain_device *port_dev = sas_port->port_dev;
1444 struct domain_device *device;
1446 if (!port_dev || !dev_is_expander(port_dev->dev_type))
1449 /* Try to find a SATA device */
1450 list_for_each_entry(device, &sas_port->dev_list,
1452 if (dev_is_sata(device)) {
1453 hisi_sas_send_ata_reset_each_phy(hisi_hba,
1462 void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba)
1464 struct Scsi_Host *shost = hisi_hba->shost;
1466 hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba);
1468 scsi_block_requests(shost);
1469 hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000);
1471 del_timer_sync(&hisi_hba->timer);
1473 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1475 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_prepare);
1477 void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba)
1479 struct Scsi_Host *shost = hisi_hba->shost;
1481 /* Init and wait for PHYs to come up and all libsas event finished. */
1482 hisi_hba->hw->phys_init(hisi_hba);
1484 hisi_sas_refresh_port_id(hisi_hba);
1485 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1487 if (hisi_hba->reject_stp_links_msk)
1488 hisi_sas_terminate_stp_reject(hisi_hba);
1489 hisi_sas_reset_init_all_devices(hisi_hba);
1490 scsi_unblock_requests(shost);
1491 clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
1494 hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state);
1496 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done);
1498 static int hisi_sas_controller_prereset(struct hisi_hba *hisi_hba)
1500 if (!hisi_hba->hw->soft_reset)
1503 down(&hisi_hba->sem);
1504 if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) {
1509 if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
1510 hisi_hba->hw->debugfs_snapshot_regs(hisi_hba);
1515 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
1517 struct device *dev = hisi_hba->dev;
1518 struct Scsi_Host *shost = hisi_hba->shost;
1521 dev_info(dev, "controller resetting...\n");
1522 hisi_sas_controller_reset_prepare(hisi_hba);
1524 rc = hisi_hba->hw->soft_reset(hisi_hba);
1526 dev_warn(dev, "controller reset failed (%d)\n", rc);
1527 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1529 scsi_unblock_requests(shost);
1530 clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
1534 hisi_sas_controller_reset_done(hisi_hba);
1535 clear_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags);
1536 dev_info(dev, "controller reset complete\n");
1541 static int hisi_sas_abort_task(struct sas_task *task)
1543 struct hisi_sas_internal_abort_data internal_abort_data = { false };
1544 struct domain_device *device = task->dev;
1545 struct hisi_sas_device *sas_dev = device->lldd_dev;
1546 struct hisi_hba *hisi_hba;
1548 int rc = TMF_RESP_FUNC_FAILED;
1549 unsigned long flags;
1552 return TMF_RESP_FUNC_FAILED;
1554 hisi_hba = dev_to_hisi_hba(task->dev);
1555 dev = hisi_hba->dev;
1557 spin_lock_irqsave(&task->task_state_lock, flags);
1558 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1559 struct hisi_sas_slot *slot = task->lldd_task;
1560 struct hisi_sas_cq *cq;
1564 * sync irq to avoid free'ing task
1565 * before using task in IO completion
1567 cq = &hisi_hba->cq[slot->dlvry_queue];
1568 synchronize_irq(cq->irq_no);
1570 spin_unlock_irqrestore(&task->task_state_lock, flags);
1571 rc = TMF_RESP_FUNC_COMPLETE;
1574 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1575 spin_unlock_irqrestore(&task->task_state_lock, flags);
1577 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1578 struct hisi_sas_slot *slot = task->lldd_task;
1579 u16 tag = slot->idx;
1582 rc = sas_abort_task(task, tag);
1583 rc2 = sas_execute_internal_abort_single(device, tag,
1584 slot->dlvry_queue, &internal_abort_data);
1586 dev_err(dev, "abort task: internal abort (%d)\n", rc2);
1587 return TMF_RESP_FUNC_FAILED;
1591 * If the TMF finds that the IO is not in the device and also
1592 * the internal abort does not succeed, then it is safe to
1594 * Note: if the internal abort succeeds then the slot
1595 * will have already been completed
1597 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) {
1598 if (task->lldd_task)
1599 hisi_sas_do_release_task(hisi_hba, task, slot);
1601 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1602 task->task_proto & SAS_PROTOCOL_STP) {
1603 if (task->dev->dev_type == SAS_SATA_DEV) {
1604 rc = hisi_sas_internal_task_abort_dev(sas_dev, false);
1606 dev_err(dev, "abort task: internal abort failed\n");
1609 hisi_sas_dereg_device(hisi_hba, device);
1610 rc = hisi_sas_softreset_ata_disk(device);
1612 } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) {
1614 struct hisi_sas_slot *slot = task->lldd_task;
1615 u32 tag = slot->idx;
1616 struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue];
1618 rc = sas_execute_internal_abort_single(device,
1619 tag, slot->dlvry_queue,
1620 &internal_abort_data);
1621 if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
1624 * sync irq to avoid free'ing task
1625 * before using task in IO completion
1627 synchronize_irq(cq->irq_no);
1633 if (rc != TMF_RESP_FUNC_COMPLETE)
1634 dev_notice(dev, "abort task: rc=%d\n", rc);
1638 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
1640 struct hisi_sas_device *sas_dev = device->lldd_dev;
1641 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1642 struct device *dev = hisi_hba->dev;
1645 rc = hisi_sas_internal_task_abort_dev(sas_dev, false);
1647 dev_err(dev, "abort task set: internal abort rc=%d\n", rc);
1648 return TMF_RESP_FUNC_FAILED;
1650 hisi_sas_dereg_device(hisi_hba, device);
1652 rc = sas_abort_task_set(device, lun);
1653 if (rc == TMF_RESP_FUNC_COMPLETE)
1654 hisi_sas_release_task(hisi_hba, device);
1659 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
1661 struct sas_phy *local_phy = sas_get_local_phy(device);
1662 struct hisi_sas_device *sas_dev = device->lldd_dev;
1663 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1664 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1667 if (!local_phy->enabled) {
1668 sas_put_local_phy(local_phy);
1672 if (scsi_is_sas_phy_local(local_phy)) {
1673 struct asd_sas_phy *sas_phy =
1674 sas_ha->sas_phy[local_phy->number];
1675 struct hisi_sas_phy *phy =
1676 container_of(sas_phy, struct hisi_sas_phy, sas_phy);
1677 unsigned long flags;
1679 spin_lock_irqsave(&phy->lock, flags);
1681 spin_unlock_irqrestore(&phy->lock, flags);
1684 reset_type = (sas_dev->dev_status == HISI_SAS_DEV_INIT ||
1685 !dev_is_sata(device)) ? true : false;
1687 rc = sas_phy_reset(local_phy, reset_type);
1688 sas_put_local_phy(local_phy);
1690 if (scsi_is_sas_phy_local(local_phy)) {
1691 struct asd_sas_phy *sas_phy =
1692 sas_ha->sas_phy[local_phy->number];
1693 struct hisi_sas_phy *phy =
1694 container_of(sas_phy, struct hisi_sas_phy, sas_phy);
1695 unsigned long flags;
1697 spin_lock_irqsave(&phy->lock, flags);
1699 spin_unlock_irqrestore(&phy->lock, flags);
1701 /* report PHY down if timed out */
1702 if (rc == -ETIMEDOUT)
1703 hisi_sas_phy_down(hisi_hba, sas_phy->id, 0, GFP_KERNEL);
1711 if (dev_is_sata(device)) {
1712 rc = sas_ata_wait_after_reset(device,
1713 HISI_SAS_WAIT_PHYUP_TIMEOUT);
1721 static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
1723 struct hisi_sas_device *sas_dev = device->lldd_dev;
1724 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1725 struct device *dev = hisi_hba->dev;
1728 rc = hisi_sas_internal_task_abort_dev(sas_dev, false);
1730 dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc);
1731 return TMF_RESP_FUNC_FAILED;
1733 hisi_sas_dereg_device(hisi_hba, device);
1735 rc = hisi_sas_debug_I_T_nexus_reset(device);
1736 if (rc == TMF_RESP_FUNC_COMPLETE && dev_is_sata(device)) {
1737 struct sas_phy *local_phy;
1739 rc = hisi_sas_softreset_ata_disk(device);
1744 case TMF_RESP_FUNC_FAILED:
1747 local_phy = sas_get_local_phy(device);
1748 rc = sas_phy_enable(local_phy, 0);
1750 local_phy->enabled = 0;
1751 dev_err(dev, "Disabled local phy of ATA disk %016llx due to softreset fail (%d)\n",
1752 SAS_ADDR(device->sas_addr), rc);
1755 sas_put_local_phy(local_phy);
1762 if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV))
1763 hisi_sas_release_task(hisi_hba, device);
1768 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
1770 struct hisi_sas_device *sas_dev = device->lldd_dev;
1771 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1772 struct device *dev = hisi_hba->dev;
1773 int rc = TMF_RESP_FUNC_FAILED;
1775 /* Clear internal IO and then lu reset */
1776 rc = hisi_sas_internal_task_abort_dev(sas_dev, false);
1778 dev_err(dev, "lu_reset: internal abort failed\n");
1781 hisi_sas_dereg_device(hisi_hba, device);
1783 if (dev_is_sata(device)) {
1784 struct sas_phy *phy;
1786 phy = sas_get_local_phy(device);
1788 rc = sas_phy_reset(phy, true);
1791 hisi_sas_release_task(hisi_hba, device);
1792 sas_put_local_phy(phy);
1794 rc = sas_lu_reset(device, lun);
1795 if (rc == TMF_RESP_FUNC_COMPLETE)
1796 hisi_sas_release_task(hisi_hba, device);
1799 if (rc != TMF_RESP_FUNC_COMPLETE)
1800 dev_err(dev, "lu_reset: for device[%d]:rc= %d\n",
1801 sas_dev->device_id, rc);
1805 static void hisi_sas_async_I_T_nexus_reset(void *data, async_cookie_t cookie)
1807 struct domain_device *device = data;
1808 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1811 rc = hisi_sas_debug_I_T_nexus_reset(device);
1812 if (rc != TMF_RESP_FUNC_COMPLETE)
1813 dev_info(hisi_hba->dev, "I_T_nexus reset fail for dev:%016llx rc=%d\n",
1814 SAS_ADDR(device->sas_addr), rc);
1817 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
1819 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
1820 HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
1821 ASYNC_DOMAIN_EXCLUSIVE(async);
1824 queue_work(hisi_hba->wq, &r.work);
1825 wait_for_completion(r.completion);
1827 return TMF_RESP_FUNC_FAILED;
1829 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1830 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1831 struct domain_device *device = sas_dev->sas_device;
1833 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device ||
1834 dev_is_expander(device->dev_type))
1837 async_schedule_domain(hisi_sas_async_I_T_nexus_reset,
1841 async_synchronize_full_domain(&async);
1842 hisi_sas_release_tasks(hisi_hba);
1844 return TMF_RESP_FUNC_COMPLETE;
1847 static int hisi_sas_query_task(struct sas_task *task)
1849 int rc = TMF_RESP_FUNC_FAILED;
1851 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1852 struct hisi_sas_slot *slot = task->lldd_task;
1853 u32 tag = slot->idx;
1855 rc = sas_query_task(task, tag);
1857 /* The task is still in Lun, release it then */
1858 case TMF_RESP_FUNC_SUCC:
1859 /* The task is not in Lun or failed, reset the phy */
1860 case TMF_RESP_FUNC_FAILED:
1861 case TMF_RESP_FUNC_COMPLETE:
1864 rc = TMF_RESP_FUNC_FAILED;
1871 static bool hisi_sas_internal_abort_timeout(struct sas_task *task,
1874 struct domain_device *device = task->dev;
1875 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1876 struct hisi_sas_internal_abort_data *timeout = data;
1878 if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
1879 queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
1881 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1882 pr_err("Internal abort: timeout %016llx\n",
1883 SAS_ADDR(device->sas_addr));
1885 struct hisi_sas_slot *slot = task->lldd_task;
1887 set_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags);
1890 struct hisi_sas_cq *cq =
1891 &hisi_hba->cq[slot->dlvry_queue];
1893 * sync irq to avoid free'ing task
1894 * before using task in IO completion
1896 synchronize_irq(cq->irq_no);
1900 if (timeout->rst_ha_timeout) {
1901 pr_err("Internal abort: timeout and not done %016llx. Queuing reset.\n",
1902 SAS_ADDR(device->sas_addr));
1903 queue_work(hisi_hba->wq, &hisi_hba->rst_work);
1905 pr_err("Internal abort: timeout and not done %016llx.\n",
1906 SAS_ADDR(device->sas_addr));
1915 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
1917 hisi_sas_port_notify_formed(sas_phy);
1920 static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
1921 u8 reg_index, u8 reg_count, u8 *write_data)
1923 struct hisi_hba *hisi_hba = sha->lldd_ha;
1925 if (!hisi_hba->hw->write_gpio)
1928 return hisi_hba->hw->write_gpio(hisi_hba, reg_type,
1929 reg_index, reg_count, write_data);
1932 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
1934 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1935 struct sas_phy *sphy = sas_phy->phy;
1936 unsigned long flags;
1938 phy->phy_attached = 0;
1942 spin_lock_irqsave(&phy->lock, flags);
1944 sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
1946 sphy->negotiated_linkrate = SAS_PHY_DISABLED;
1947 spin_unlock_irqrestore(&phy->lock, flags);
1950 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy,
1953 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1954 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1955 struct device *dev = hisi_hba->dev;
1958 /* Phy down but ready */
1959 hisi_sas_bytes_dmaed(hisi_hba, phy_no, gfp_flags);
1960 hisi_sas_port_notify_formed(sas_phy);
1962 struct hisi_sas_port *port = phy->port;
1964 if (test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags) ||
1966 dev_info(dev, "ignore flutter phy%d down\n", phy_no);
1969 /* Phy down and not ready */
1970 sas_notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL, gfp_flags);
1971 sas_phy_disconnected(sas_phy);
1974 if (phy->phy_type & PORT_TYPE_SAS) {
1975 int port_id = port->id;
1977 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba,
1979 port->port_attached = 0;
1980 } else if (phy->phy_type & PORT_TYPE_SATA)
1981 port->port_attached = 0;
1983 hisi_sas_phy_disconnected(phy);
1986 EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
1988 void hisi_sas_sync_irqs(struct hisi_hba *hisi_hba)
1992 for (i = 0; i < hisi_hba->cq_nvecs; i++) {
1993 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1995 synchronize_irq(cq->irq_no);
1998 EXPORT_SYMBOL_GPL(hisi_sas_sync_irqs);
2000 int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type)
2002 struct hisi_hba *hisi_hba = shost_priv(shost);
2004 if (reset_type != SCSI_ADAPTER_RESET)
2007 queue_work(hisi_hba->wq, &hisi_hba->rst_work);
2011 EXPORT_SYMBOL_GPL(hisi_sas_host_reset);
2013 struct scsi_transport_template *hisi_sas_stt;
2014 EXPORT_SYMBOL_GPL(hisi_sas_stt);
2016 static struct sas_domain_function_template hisi_sas_transport_ops = {
2017 .lldd_dev_found = hisi_sas_dev_found,
2018 .lldd_dev_gone = hisi_sas_dev_gone,
2019 .lldd_execute_task = hisi_sas_queue_command,
2020 .lldd_control_phy = hisi_sas_control_phy,
2021 .lldd_abort_task = hisi_sas_abort_task,
2022 .lldd_abort_task_set = hisi_sas_abort_task_set,
2023 .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset,
2024 .lldd_lu_reset = hisi_sas_lu_reset,
2025 .lldd_query_task = hisi_sas_query_task,
2026 .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
2027 .lldd_port_formed = hisi_sas_port_formed,
2028 .lldd_write_gpio = hisi_sas_write_gpio,
2029 .lldd_tmf_aborted = hisi_sas_tmf_aborted,
2030 .lldd_abort_timeout = hisi_sas_internal_abort_timeout,
2033 void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
2035 int i, s, j, max_command_entries = HISI_SAS_MAX_COMMANDS;
2036 struct hisi_sas_breakpoint *sata_breakpoint = hisi_hba->sata_breakpoint;
2038 for (i = 0; i < hisi_hba->queue_count; i++) {
2039 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
2040 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
2041 struct hisi_sas_cmd_hdr *cmd_hdr = hisi_hba->cmd_hdr[i];
2043 s = sizeof(struct hisi_sas_cmd_hdr);
2044 for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++)
2045 memset(&cmd_hdr[j], 0, s);
2049 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
2050 memset(hisi_hba->complete_hdr[i], 0, s);
2054 s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy;
2055 memset(hisi_hba->initial_fis, 0, s);
2057 s = max_command_entries * sizeof(struct hisi_sas_iost);
2058 memset(hisi_hba->iost, 0, s);
2060 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
2061 memset(hisi_hba->breakpoint, 0, s);
2063 s = sizeof(struct hisi_sas_sata_breakpoint);
2064 for (j = 0; j < HISI_SAS_MAX_ITCT_ENTRIES; j++)
2065 memset(&sata_breakpoint[j], 0, s);
2067 EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
2069 int hisi_sas_alloc(struct hisi_hba *hisi_hba)
2071 struct device *dev = hisi_hba->dev;
2072 int i, j, s, max_command_entries = HISI_SAS_MAX_COMMANDS;
2073 int max_command_entries_ru, sz_slot_buf_ru;
2074 int blk_cnt, slots_per_blk;
2076 sema_init(&hisi_hba->sem, 1);
2077 spin_lock_init(&hisi_hba->lock);
2078 for (i = 0; i < hisi_hba->n_phy; i++) {
2079 hisi_sas_phy_init(hisi_hba, i);
2080 hisi_hba->port[i].port_attached = 0;
2081 hisi_hba->port[i].id = -1;
2084 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
2085 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED;
2086 hisi_hba->devices[i].device_id = i;
2087 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_INIT;
2090 for (i = 0; i < hisi_hba->queue_count; i++) {
2091 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
2092 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
2094 /* Completion queue structure */
2096 cq->hisi_hba = hisi_hba;
2098 /* Delivery queue structure */
2099 spin_lock_init(&dq->lock);
2100 INIT_LIST_HEAD(&dq->list);
2102 dq->hisi_hba = hisi_hba;
2104 /* Delivery queue */
2105 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
2106 hisi_hba->cmd_hdr[i] = dmam_alloc_coherent(dev, s,
2107 &hisi_hba->cmd_hdr_dma[i],
2109 if (!hisi_hba->cmd_hdr[i])
2112 /* Completion queue */
2113 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
2114 hisi_hba->complete_hdr[i] = dmam_alloc_coherent(dev, s,
2115 &hisi_hba->complete_hdr_dma[i],
2117 if (!hisi_hba->complete_hdr[i])
2121 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
2122 hisi_hba->itct = dmam_alloc_coherent(dev, s, &hisi_hba->itct_dma,
2124 if (!hisi_hba->itct)
2127 hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
2128 sizeof(struct hisi_sas_slot),
2130 if (!hisi_hba->slot_info)
2133 /* roundup to avoid overly large block size */
2134 max_command_entries_ru = roundup(max_command_entries, 64);
2135 if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK)
2136 sz_slot_buf_ru = sizeof(struct hisi_sas_slot_dif_buf_table);
2138 sz_slot_buf_ru = sizeof(struct hisi_sas_slot_buf_table);
2139 sz_slot_buf_ru = roundup(sz_slot_buf_ru, 64);
2140 s = max(lcm(max_command_entries_ru, sz_slot_buf_ru), PAGE_SIZE);
2141 blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s;
2142 slots_per_blk = s / sz_slot_buf_ru;
2144 for (i = 0; i < blk_cnt; i++) {
2145 int slot_index = i * slots_per_blk;
2149 buf = dmam_alloc_coherent(dev, s, &buf_dma,
2154 for (j = 0; j < slots_per_blk; j++, slot_index++) {
2155 struct hisi_sas_slot *slot;
2157 slot = &hisi_hba->slot_info[slot_index];
2159 slot->buf_dma = buf_dma;
2160 slot->idx = slot_index;
2162 buf += sz_slot_buf_ru;
2163 buf_dma += sz_slot_buf_ru;
2167 s = max_command_entries * sizeof(struct hisi_sas_iost);
2168 hisi_hba->iost = dmam_alloc_coherent(dev, s, &hisi_hba->iost_dma,
2170 if (!hisi_hba->iost)
2173 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
2174 hisi_hba->breakpoint = dmam_alloc_coherent(dev, s,
2175 &hisi_hba->breakpoint_dma,
2177 if (!hisi_hba->breakpoint)
2180 s = hisi_hba->slot_index_count = max_command_entries;
2181 hisi_hba->slot_index_tags = devm_bitmap_zalloc(dev, s, GFP_KERNEL);
2182 if (!hisi_hba->slot_index_tags)
2185 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
2186 hisi_hba->initial_fis = dmam_alloc_coherent(dev, s,
2187 &hisi_hba->initial_fis_dma,
2189 if (!hisi_hba->initial_fis)
2192 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
2193 hisi_hba->sata_breakpoint = dmam_alloc_coherent(dev, s,
2194 &hisi_hba->sata_breakpoint_dma,
2196 if (!hisi_hba->sata_breakpoint)
2199 hisi_hba->last_slot_index = HISI_SAS_UNRESERVED_IPTT;
2201 hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
2202 if (!hisi_hba->wq) {
2203 dev_err(dev, "sas_alloc: failed to create workqueue\n");
2211 EXPORT_SYMBOL_GPL(hisi_sas_alloc);
2213 void hisi_sas_free(struct hisi_hba *hisi_hba)
2217 for (i = 0; i < hisi_hba->n_phy; i++) {
2218 struct hisi_sas_phy *phy = &hisi_hba->phy[i];
2220 del_timer_sync(&phy->timer);
2224 destroy_workqueue(hisi_hba->wq);
2226 EXPORT_SYMBOL_GPL(hisi_sas_free);
2228 void hisi_sas_rst_work_handler(struct work_struct *work)
2230 struct hisi_hba *hisi_hba =
2231 container_of(work, struct hisi_hba, rst_work);
2233 if (hisi_sas_controller_prereset(hisi_hba))
2236 hisi_sas_controller_reset(hisi_hba);
2238 EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler);
2240 void hisi_sas_sync_rst_work_handler(struct work_struct *work)
2242 struct hisi_sas_rst *rst =
2243 container_of(work, struct hisi_sas_rst, work);
2245 if (hisi_sas_controller_prereset(rst->hisi_hba))
2248 if (!hisi_sas_controller_reset(rst->hisi_hba))
2251 complete(rst->completion);
2253 EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler);
2255 int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
2257 struct device *dev = hisi_hba->dev;
2258 struct platform_device *pdev = hisi_hba->platform_dev;
2259 struct device_node *np = pdev ? pdev->dev.of_node : NULL;
2262 if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
2264 dev_err(dev, "could not get property sas-addr\n");
2270 * These properties are only required for platform device-based
2271 * controller with DT firmware.
2273 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
2274 "hisilicon,sas-syscon");
2275 if (IS_ERR(hisi_hba->ctrl)) {
2276 dev_err(dev, "could not get syscon\n");
2280 if (device_property_read_u32(dev, "ctrl-reset-reg",
2281 &hisi_hba->ctrl_reset_reg)) {
2282 dev_err(dev, "could not get property ctrl-reset-reg\n");
2286 if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
2287 &hisi_hba->ctrl_reset_sts_reg)) {
2288 dev_err(dev, "could not get property ctrl-reset-sts-reg\n");
2292 if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
2293 &hisi_hba->ctrl_clock_ena_reg)) {
2294 dev_err(dev, "could not get property ctrl-clock-ena-reg\n");
2299 refclk = devm_clk_get(dev, NULL);
2301 dev_dbg(dev, "no ref clk property\n");
2303 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
2305 if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) {
2306 dev_err(dev, "could not get property phy-count\n");
2310 if (device_property_read_u32(dev, "queue-count",
2311 &hisi_hba->queue_count)) {
2312 dev_err(dev, "could not get property queue-count\n");
2318 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info);
2320 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
2321 const struct hisi_sas_hw *hw)
2323 struct resource *res;
2324 struct Scsi_Host *shost;
2325 struct hisi_hba *hisi_hba;
2326 struct device *dev = &pdev->dev;
2329 shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba));
2331 dev_err(dev, "scsi host alloc failed\n");
2334 hisi_hba = shost_priv(shost);
2336 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
2338 hisi_hba->dev = dev;
2339 hisi_hba->platform_dev = pdev;
2340 hisi_hba->shost = shost;
2341 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
2343 timer_setup(&hisi_hba->timer, NULL, 0);
2345 if (hisi_sas_get_fw_info(hisi_hba) < 0)
2348 error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
2350 dev_err(dev, "No usable DMA addressing method\n");
2354 hisi_hba->regs = devm_platform_ioremap_resource(pdev, 0);
2355 if (IS_ERR(hisi_hba->regs))
2358 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2360 hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res);
2361 if (IS_ERR(hisi_hba->sgpio_regs))
2365 if (hisi_sas_alloc(hisi_hba)) {
2366 hisi_sas_free(hisi_hba);
2372 scsi_host_put(shost);
2373 dev_err(dev, "shost alloc failed\n");
2377 static int hisi_sas_interrupt_preinit(struct hisi_hba *hisi_hba)
2379 if (hisi_hba->hw->interrupt_preinit)
2380 return hisi_hba->hw->interrupt_preinit(hisi_hba);
2384 int hisi_sas_probe(struct platform_device *pdev,
2385 const struct hisi_sas_hw *hw)
2387 struct Scsi_Host *shost;
2388 struct hisi_hba *hisi_hba;
2389 struct device *dev = &pdev->dev;
2390 struct asd_sas_phy **arr_phy;
2391 struct asd_sas_port **arr_port;
2392 struct sas_ha_struct *sha;
2393 int rc, phy_nr, port_nr, i;
2395 shost = hisi_sas_shost_alloc(pdev, hw);
2399 sha = SHOST_TO_SAS_HA(shost);
2400 hisi_hba = shost_priv(shost);
2401 platform_set_drvdata(pdev, sha);
2403 phy_nr = port_nr = hisi_hba->n_phy;
2405 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
2406 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
2407 if (!arr_phy || !arr_port) {
2412 sha->sas_phy = arr_phy;
2413 sha->sas_port = arr_port;
2414 sha->lldd_ha = hisi_hba;
2416 shost->transportt = hisi_sas_stt;
2417 shost->max_id = HISI_SAS_MAX_DEVICES;
2418 shost->max_lun = ~0;
2419 shost->max_channel = 1;
2420 shost->max_cmd_len = 16;
2421 if (hisi_hba->hw->slot_index_alloc) {
2422 shost->can_queue = HISI_SAS_MAX_COMMANDS;
2423 shost->cmd_per_lun = HISI_SAS_MAX_COMMANDS;
2425 shost->can_queue = HISI_SAS_UNRESERVED_IPTT;
2426 shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT;
2429 sha->sas_ha_name = DRV_NAME;
2430 sha->dev = hisi_hba->dev;
2431 sha->lldd_module = THIS_MODULE;
2432 sha->sas_addr = &hisi_hba->sas_addr[0];
2433 sha->num_phys = hisi_hba->n_phy;
2434 sha->core.shost = hisi_hba->shost;
2436 for (i = 0; i < hisi_hba->n_phy; i++) {
2437 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
2438 sha->sas_port[i] = &hisi_hba->port[i].sas_port;
2441 rc = hisi_sas_interrupt_preinit(hisi_hba);
2445 rc = scsi_add_host(shost, &pdev->dev);
2449 rc = sas_register_ha(sha);
2451 goto err_out_register_ha;
2453 rc = hisi_hba->hw->hw_init(hisi_hba);
2455 goto err_out_hw_init;
2457 scsi_scan_host(shost);
2462 sas_unregister_ha(sha);
2463 err_out_register_ha:
2464 scsi_remove_host(shost);
2466 hisi_sas_free(hisi_hba);
2467 scsi_host_put(shost);
2470 EXPORT_SYMBOL_GPL(hisi_sas_probe);
2472 int hisi_sas_remove(struct platform_device *pdev)
2474 struct sas_ha_struct *sha = platform_get_drvdata(pdev);
2475 struct hisi_hba *hisi_hba = sha->lldd_ha;
2476 struct Scsi_Host *shost = sha->core.shost;
2478 del_timer_sync(&hisi_hba->timer);
2480 sas_unregister_ha(sha);
2481 sas_remove_host(sha->core.shost);
2483 hisi_sas_free(hisi_hba);
2484 scsi_host_put(shost);
2487 EXPORT_SYMBOL_GPL(hisi_sas_remove);
2489 #if IS_ENABLED(CONFIG_SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE)
2490 #define DEBUGFS_ENABLE_DEFAULT "enabled"
2491 bool hisi_sas_debugfs_enable = true;
2492 u32 hisi_sas_debugfs_dump_count = 50;
2494 #define DEBUGFS_ENABLE_DEFAULT "disabled"
2495 bool hisi_sas_debugfs_enable;
2496 u32 hisi_sas_debugfs_dump_count = 1;
2499 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_enable);
2500 module_param_named(debugfs_enable, hisi_sas_debugfs_enable, bool, 0444);
2501 MODULE_PARM_DESC(hisi_sas_debugfs_enable,
2502 "Enable driver debugfs (default "DEBUGFS_ENABLE_DEFAULT")");
2504 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dump_count);
2505 module_param_named(debugfs_dump_count, hisi_sas_debugfs_dump_count, uint, 0444);
2506 MODULE_PARM_DESC(hisi_sas_debugfs_dump_count, "Number of debugfs dumps to allow");
2508 struct dentry *hisi_sas_debugfs_dir;
2509 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dir);
2511 static __init int hisi_sas_init(void)
2513 hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
2517 if (hisi_sas_debugfs_enable) {
2518 hisi_sas_debugfs_dir = debugfs_create_dir("hisi_sas", NULL);
2519 if (hisi_sas_debugfs_dump_count > HISI_SAS_MAX_DEBUGFS_DUMP) {
2520 pr_info("hisi_sas: Limiting debugfs dump count\n");
2521 hisi_sas_debugfs_dump_count = HISI_SAS_MAX_DEBUGFS_DUMP;
2528 static __exit void hisi_sas_exit(void)
2530 sas_release_transport(hisi_sas_stt);
2532 debugfs_remove(hisi_sas_debugfs_dir);
2535 module_init(hisi_sas_init);
2536 module_exit(hisi_sas_exit);
2538 MODULE_LICENSE("GPL");
2539 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
2540 MODULE_DESCRIPTION("HISILICON SAS controller driver");
2541 MODULE_ALIAS("platform:" DRV_NAME);