1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Driver for Broadcom MPI3 Storage Controllers
5 * Copyright (C) 2017-2023 Broadcom Inc.
6 * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
11 #include <linux/bsg-lib.h>
12 #include <uapi/scsi/scsi_bsg_mpi3mr.h>
15 * mpi3mr_bsg_pel_abort - sends PEL abort request
16 * @mrioc: Adapter instance reference
18 * This function sends PEL abort request to the firmware through
19 * admin request queue.
21 * Return: 0 on success, -1 on failure
23 static int mpi3mr_bsg_pel_abort(struct mpi3mr_ioc *mrioc)
25 struct mpi3_pel_req_action_abort pel_abort_req;
26 struct mpi3_pel_reply *pel_reply;
30 if (mrioc->reset_in_progress) {
31 dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__);
34 if (mrioc->stop_bsgs) {
35 dprint_bsg_err(mrioc, "%s: bsgs are blocked\n", __func__);
39 memset(&pel_abort_req, 0, sizeof(pel_abort_req));
40 mutex_lock(&mrioc->pel_abort_cmd.mutex);
41 if (mrioc->pel_abort_cmd.state & MPI3MR_CMD_PENDING) {
42 dprint_bsg_err(mrioc, "%s: command is in use\n", __func__);
43 mutex_unlock(&mrioc->pel_abort_cmd.mutex);
46 mrioc->pel_abort_cmd.state = MPI3MR_CMD_PENDING;
47 mrioc->pel_abort_cmd.is_waiting = 1;
48 mrioc->pel_abort_cmd.callback = NULL;
49 pel_abort_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_ABORT);
50 pel_abort_req.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
51 pel_abort_req.action = MPI3_PEL_ACTION_ABORT;
52 pel_abort_req.abort_host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT);
54 mrioc->pel_abort_requested = 1;
55 init_completion(&mrioc->pel_abort_cmd.done);
56 retval = mpi3mr_admin_request_post(mrioc, &pel_abort_req,
57 sizeof(pel_abort_req), 0);
60 dprint_bsg_err(mrioc, "%s: admin request post failed\n",
62 mrioc->pel_abort_requested = 0;
66 wait_for_completion_timeout(&mrioc->pel_abort_cmd.done,
67 (MPI3MR_INTADMCMD_TIMEOUT * HZ));
68 if (!(mrioc->pel_abort_cmd.state & MPI3MR_CMD_COMPLETE)) {
69 mrioc->pel_abort_cmd.is_waiting = 0;
70 dprint_bsg_err(mrioc, "%s: command timedout\n", __func__);
71 if (!(mrioc->pel_abort_cmd.state & MPI3MR_CMD_RESET))
72 mpi3mr_soft_reset_handler(mrioc,
73 MPI3MR_RESET_FROM_PELABORT_TIMEOUT, 1);
77 if ((mrioc->pel_abort_cmd.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
78 != MPI3_IOCSTATUS_SUCCESS) {
80 "%s: command failed, ioc_status(0x%04x) log_info(0x%08x)\n",
81 __func__, (mrioc->pel_abort_cmd.ioc_status &
82 MPI3_IOCSTATUS_STATUS_MASK),
83 mrioc->pel_abort_cmd.ioc_loginfo);
87 if (mrioc->pel_abort_cmd.state & MPI3MR_CMD_REPLY_VALID) {
88 pel_reply = (struct mpi3_pel_reply *)mrioc->pel_abort_cmd.reply;
89 pe_log_status = le16_to_cpu(pel_reply->pe_log_status);
90 if (pe_log_status != MPI3_PEL_STATUS_SUCCESS) {
92 "%s: command failed, pel_status(0x%04x)\n",
93 __func__, pe_log_status);
99 mrioc->pel_abort_cmd.state = MPI3MR_CMD_NOTUSED;
100 mutex_unlock(&mrioc->pel_abort_cmd.mutex);
104 * mpi3mr_bsg_verify_adapter - verify adapter number is valid
105 * @ioc_number: Adapter number
107 * This function returns the adapter instance pointer of given
108 * adapter number. If adapter number does not match with the
109 * driver's adapter list, driver returns NULL.
111 * Return: adapter instance reference
113 static struct mpi3mr_ioc *mpi3mr_bsg_verify_adapter(int ioc_number)
115 struct mpi3mr_ioc *mrioc = NULL;
117 spin_lock(&mrioc_list_lock);
118 list_for_each_entry(mrioc, &mrioc_list, list) {
119 if (mrioc->id == ioc_number) {
120 spin_unlock(&mrioc_list_lock);
124 spin_unlock(&mrioc_list_lock);
129 * mpi3mr_enable_logdata - Handler for log data enable
130 * @mrioc: Adapter instance reference
131 * @job: BSG job reference
133 * This function enables log data caching in the driver if not
134 * already enabled and return the maximum number of log data
135 * entries that can be cached in the driver.
137 * Return: 0 on success and proper error codes on failure
139 static long mpi3mr_enable_logdata(struct mpi3mr_ioc *mrioc,
142 struct mpi3mr_logdata_enable logdata_enable;
144 if (!mrioc->logdata_buf) {
145 mrioc->logdata_entry_sz =
146 (mrioc->reply_sz - (sizeof(struct mpi3_event_notification_reply) - 4))
147 + MPI3MR_BSG_LOGDATA_ENTRY_HEADER_SZ;
148 mrioc->logdata_buf_idx = 0;
149 mrioc->logdata_buf = kcalloc(MPI3MR_BSG_LOGDATA_MAX_ENTRIES,
150 mrioc->logdata_entry_sz, GFP_KERNEL);
152 if (!mrioc->logdata_buf)
156 memset(&logdata_enable, 0, sizeof(logdata_enable));
157 logdata_enable.max_entries =
158 MPI3MR_BSG_LOGDATA_MAX_ENTRIES;
159 if (job->request_payload.payload_len >= sizeof(logdata_enable)) {
160 sg_copy_from_buffer(job->request_payload.sg_list,
161 job->request_payload.sg_cnt,
162 &logdata_enable, sizeof(logdata_enable));
169 * mpi3mr_get_logdata - Handler for get log data
170 * @mrioc: Adapter instance reference
171 * @job: BSG job pointer
172 * This function copies the log data entries to the user buffer
173 * when log caching is enabled in the driver.
175 * Return: 0 on success and proper error codes on failure
177 static long mpi3mr_get_logdata(struct mpi3mr_ioc *mrioc,
180 u16 num_entries, sz, entry_sz = mrioc->logdata_entry_sz;
182 if ((!mrioc->logdata_buf) || (job->request_payload.payload_len < entry_sz))
185 num_entries = job->request_payload.payload_len / entry_sz;
186 if (num_entries > MPI3MR_BSG_LOGDATA_MAX_ENTRIES)
187 num_entries = MPI3MR_BSG_LOGDATA_MAX_ENTRIES;
188 sz = num_entries * entry_sz;
190 if (job->request_payload.payload_len >= sz) {
191 sg_copy_from_buffer(job->request_payload.sg_list,
192 job->request_payload.sg_cnt,
193 mrioc->logdata_buf, sz);
200 * mpi3mr_bsg_pel_enable - Handler for PEL enable driver
201 * @mrioc: Adapter instance reference
202 * @job: BSG job pointer
204 * This function is the handler for PEL enable driver.
205 * Validates the application given class and locale and if
206 * requires aborts the existing PEL wait request and/or issues
207 * new PEL wait request to the firmware and returns.
209 * Return: 0 on success and proper error codes on failure.
211 static long mpi3mr_bsg_pel_enable(struct mpi3mr_ioc *mrioc,
215 struct mpi3mr_bsg_out_pel_enable pel_enable;
220 if (job->request_payload.payload_len != sizeof(pel_enable)) {
221 dprint_bsg_err(mrioc, "%s: invalid size argument\n",
226 if (mrioc->unrecoverable) {
227 dprint_bsg_err(mrioc, "%s: unrecoverable controller\n",
232 if (mrioc->reset_in_progress) {
233 dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__);
237 if (mrioc->stop_bsgs) {
238 dprint_bsg_err(mrioc, "%s: bsgs are blocked\n", __func__);
242 sg_copy_to_buffer(job->request_payload.sg_list,
243 job->request_payload.sg_cnt,
244 &pel_enable, sizeof(pel_enable));
246 if (pel_enable.pel_class > MPI3_PEL_CLASS_FAULT) {
247 dprint_bsg_err(mrioc, "%s: out of range class %d sent\n",
248 __func__, pel_enable.pel_class);
252 if (!mrioc->pel_enabled)
255 if ((mrioc->pel_class <= pel_enable.pel_class) &&
256 !((mrioc->pel_locale & pel_enable.pel_locale) ^
257 pel_enable.pel_locale)) {
261 pel_enable.pel_locale |= mrioc->pel_locale;
263 if (mrioc->pel_class < pel_enable.pel_class)
264 pel_enable.pel_class = mrioc->pel_class;
266 rval = mpi3mr_bsg_pel_abort(mrioc);
268 dprint_bsg_err(mrioc,
269 "%s: pel_abort failed, status(%ld)\n",
276 if (issue_pel_wait) {
277 tmp_class = mrioc->pel_class;
278 tmp_locale = mrioc->pel_locale;
279 mrioc->pel_class = pel_enable.pel_class;
280 mrioc->pel_locale = pel_enable.pel_locale;
281 mrioc->pel_enabled = 1;
282 rval = mpi3mr_pel_get_seqnum_post(mrioc, NULL);
284 mrioc->pel_class = tmp_class;
285 mrioc->pel_locale = tmp_locale;
286 mrioc->pel_enabled = 0;
287 dprint_bsg_err(mrioc,
288 "%s: pel get sequence number failed, status(%ld)\n",
297 * mpi3mr_get_all_tgt_info - Get all target information
298 * @mrioc: Adapter instance reference
299 * @job: BSG job reference
301 * This function copies the driver managed target devices device
302 * handle, persistent ID, bus ID and taret ID to the user
303 * provided buffer for the specific controller. This function
304 * also provides the number of devices managed by the driver for
305 * the specific controller.
307 * Return: 0 on success and proper error codes on failure
309 static long mpi3mr_get_all_tgt_info(struct mpi3mr_ioc *mrioc,
312 u16 num_devices = 0, i = 0, size;
314 struct mpi3mr_tgt_dev *tgtdev;
315 struct mpi3mr_device_map_info *devmap_info = NULL;
316 struct mpi3mr_all_tgt_info *alltgt_info = NULL;
317 uint32_t min_entrylen = 0, kern_entrylen = 0, usr_entrylen = 0;
319 if (job->request_payload.payload_len < sizeof(u32)) {
320 dprint_bsg_err(mrioc, "%s: invalid size argument\n",
325 spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
326 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list)
328 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
330 if ((job->request_payload.payload_len <= sizeof(u64)) ||
331 list_empty(&mrioc->tgtdev_list)) {
332 sg_copy_from_buffer(job->request_payload.sg_list,
333 job->request_payload.sg_cnt,
334 &num_devices, sizeof(num_devices));
338 kern_entrylen = num_devices * sizeof(*devmap_info);
339 size = sizeof(u64) + kern_entrylen;
340 alltgt_info = kzalloc(size, GFP_KERNEL);
344 devmap_info = alltgt_info->dmi;
345 memset((u8 *)devmap_info, 0xFF, kern_entrylen);
346 spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
347 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
348 if (i < num_devices) {
349 devmap_info[i].handle = tgtdev->dev_handle;
350 devmap_info[i].perst_id = tgtdev->perst_id;
351 if (tgtdev->host_exposed && tgtdev->starget) {
352 devmap_info[i].target_id = tgtdev->starget->id;
353 devmap_info[i].bus_id =
354 tgtdev->starget->channel;
360 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
362 alltgt_info->num_devices = num_devices;
364 usr_entrylen = (job->request_payload.payload_len - sizeof(u64)) /
365 sizeof(*devmap_info);
366 usr_entrylen *= sizeof(*devmap_info);
367 min_entrylen = min(usr_entrylen, kern_entrylen);
369 sg_copy_from_buffer(job->request_payload.sg_list,
370 job->request_payload.sg_cnt,
371 alltgt_info, (min_entrylen + sizeof(u64)));
376 * mpi3mr_get_change_count - Get topology change count
377 * @mrioc: Adapter instance reference
378 * @job: BSG job reference
380 * This function copies the toplogy change count provided by the
381 * driver in events and cached in the driver to the user
382 * provided buffer for the specific controller.
384 * Return: 0 on success and proper error codes on failure
386 static long mpi3mr_get_change_count(struct mpi3mr_ioc *mrioc,
389 struct mpi3mr_change_count chgcnt;
391 memset(&chgcnt, 0, sizeof(chgcnt));
392 chgcnt.change_count = mrioc->change_count;
393 if (job->request_payload.payload_len >= sizeof(chgcnt)) {
394 sg_copy_from_buffer(job->request_payload.sg_list,
395 job->request_payload.sg_cnt,
396 &chgcnt, sizeof(chgcnt));
403 * mpi3mr_bsg_adp_reset - Issue controller reset
404 * @mrioc: Adapter instance reference
405 * @job: BSG job reference
407 * This function identifies the user provided reset type and
408 * issues approporiate reset to the controller and wait for that
409 * to complete and reinitialize the controller and then returns
411 * Return: 0 on success and proper error codes on failure
413 static long mpi3mr_bsg_adp_reset(struct mpi3mr_ioc *mrioc,
418 struct mpi3mr_bsg_adp_reset adpreset;
420 if (job->request_payload.payload_len !=
422 dprint_bsg_err(mrioc, "%s: invalid size argument\n",
427 sg_copy_to_buffer(job->request_payload.sg_list,
428 job->request_payload.sg_cnt,
429 &adpreset, sizeof(adpreset));
431 switch (adpreset.reset_type) {
432 case MPI3MR_BSG_ADPRESET_SOFT:
435 case MPI3MR_BSG_ADPRESET_DIAG_FAULT:
439 dprint_bsg_err(mrioc, "%s: unknown reset_type(%d)\n",
440 __func__, adpreset.reset_type);
444 rval = mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_APP,
448 dprint_bsg_err(mrioc,
449 "%s: reset handler returned error(%ld) for reset type %d\n",
450 __func__, rval, adpreset.reset_type);
456 * mpi3mr_bsg_populate_adpinfo - Get adapter info command handler
457 * @mrioc: Adapter instance reference
458 * @job: BSG job reference
460 * This function provides adapter information for the given
463 * Return: 0 on success and proper error codes on failure
465 static long mpi3mr_bsg_populate_adpinfo(struct mpi3mr_ioc *mrioc,
468 enum mpi3mr_iocstate ioc_state;
469 struct mpi3mr_bsg_in_adpinfo adpinfo;
471 memset(&adpinfo, 0, sizeof(adpinfo));
472 adpinfo.adp_type = MPI3MR_BSG_ADPTYPE_AVGFAMILY;
473 adpinfo.pci_dev_id = mrioc->pdev->device;
474 adpinfo.pci_dev_hw_rev = mrioc->pdev->revision;
475 adpinfo.pci_subsys_dev_id = mrioc->pdev->subsystem_device;
476 adpinfo.pci_subsys_ven_id = mrioc->pdev->subsystem_vendor;
477 adpinfo.pci_bus = mrioc->pdev->bus->number;
478 adpinfo.pci_dev = PCI_SLOT(mrioc->pdev->devfn);
479 adpinfo.pci_func = PCI_FUNC(mrioc->pdev->devfn);
480 adpinfo.pci_seg_id = pci_domain_nr(mrioc->pdev->bus);
481 adpinfo.app_intfc_ver = MPI3MR_IOCTL_VERSION;
483 ioc_state = mpi3mr_get_iocstate(mrioc);
484 if (ioc_state == MRIOC_STATE_UNRECOVERABLE)
485 adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_UNRECOVERABLE;
486 else if ((mrioc->reset_in_progress) || (mrioc->stop_bsgs))
487 adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_IN_RESET;
488 else if (ioc_state == MRIOC_STATE_FAULT)
489 adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_FAULT;
491 adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_OPERATIONAL;
493 memcpy((u8 *)&adpinfo.driver_info, (u8 *)&mrioc->driver_info,
494 sizeof(adpinfo.driver_info));
496 if (job->request_payload.payload_len >= sizeof(adpinfo)) {
497 sg_copy_from_buffer(job->request_payload.sg_list,
498 job->request_payload.sg_cnt,
499 &adpinfo, sizeof(adpinfo));
506 * mpi3mr_bsg_process_drv_cmds - Driver Command handler
507 * @job: BSG job reference
509 * This function is the top level handler for driver commands,
510 * this does basic validation of the buffer and identifies the
511 * opcode and switches to correct sub handler.
513 * Return: 0 on success and proper error codes on failure
515 static long mpi3mr_bsg_process_drv_cmds(struct bsg_job *job)
518 struct mpi3mr_ioc *mrioc = NULL;
519 struct mpi3mr_bsg_packet *bsg_req = NULL;
520 struct mpi3mr_bsg_drv_cmd *drvrcmd = NULL;
522 bsg_req = job->request;
523 drvrcmd = &bsg_req->cmd.drvrcmd;
525 mrioc = mpi3mr_bsg_verify_adapter(drvrcmd->mrioc_id);
529 if (drvrcmd->opcode == MPI3MR_DRVBSG_OPCODE_ADPINFO) {
530 rval = mpi3mr_bsg_populate_adpinfo(mrioc, job);
534 if (mutex_lock_interruptible(&mrioc->bsg_cmds.mutex))
537 switch (drvrcmd->opcode) {
538 case MPI3MR_DRVBSG_OPCODE_ADPRESET:
539 rval = mpi3mr_bsg_adp_reset(mrioc, job);
541 case MPI3MR_DRVBSG_OPCODE_ALLTGTDEVINFO:
542 rval = mpi3mr_get_all_tgt_info(mrioc, job);
544 case MPI3MR_DRVBSG_OPCODE_GETCHGCNT:
545 rval = mpi3mr_get_change_count(mrioc, job);
547 case MPI3MR_DRVBSG_OPCODE_LOGDATAENABLE:
548 rval = mpi3mr_enable_logdata(mrioc, job);
550 case MPI3MR_DRVBSG_OPCODE_GETLOGDATA:
551 rval = mpi3mr_get_logdata(mrioc, job);
553 case MPI3MR_DRVBSG_OPCODE_PELENABLE:
554 rval = mpi3mr_bsg_pel_enable(mrioc, job);
556 case MPI3MR_DRVBSG_OPCODE_UNKNOWN:
558 pr_err("%s: unsupported driver command opcode %d\n",
559 MPI3MR_DRIVER_NAME, drvrcmd->opcode);
562 mutex_unlock(&mrioc->bsg_cmds.mutex);
567 * mpi3mr_total_num_ioctl_sges - Count number of SGEs required
568 * @drv_bufs: DMA address of the buffers to be placed in sgl
569 * @bufcnt: Number of DMA buffers
571 * This function returns total number of data SGEs required
572 * including zero length SGEs and excluding management request
573 * and response buffer for the given list of data buffer
576 * Return: Number of SGE elements needed
578 static inline u16 mpi3mr_total_num_ioctl_sges(struct mpi3mr_buf_map *drv_bufs,
581 u16 i, sge_count = 0;
583 for (i = 0; i < bufcnt; i++, drv_bufs++) {
584 if (drv_bufs->data_dir == DMA_NONE ||
587 sge_count += drv_bufs->num_dma_desc;
588 if (!drv_bufs->num_dma_desc)
595 * mpi3mr_bsg_build_sgl - SGL construction for MPI commands
596 * @mrioc: Adapter instance reference
597 * @mpi_req: MPI request
598 * @sgl_offset: offset to start sgl in the MPI request
599 * @drv_bufs: DMA address of the buffers to be placed in sgl
600 * @bufcnt: Number of DMA buffers
601 * @is_rmc: Does the buffer list has management command buffer
602 * @is_rmr: Does the buffer list has management response buffer
603 * @num_datasges: Number of data buffers in the list
605 * This function places the DMA address of the given buffers in
606 * proper format as SGEs in the given MPI request.
608 * Return: 0 on success,-1 on failure
610 static int mpi3mr_bsg_build_sgl(struct mpi3mr_ioc *mrioc, u8 *mpi_req,
611 u32 sgl_offset, struct mpi3mr_buf_map *drv_bufs,
612 u8 bufcnt, u8 is_rmc, u8 is_rmr, u8 num_datasges)
614 struct mpi3_request_header *mpi_header =
615 (struct mpi3_request_header *)mpi_req;
616 u8 *sgl = (mpi_req + sgl_offset), count = 0;
617 struct mpi3_mgmt_passthrough_request *rmgmt_req =
618 (struct mpi3_mgmt_passthrough_request *)mpi_req;
619 struct mpi3mr_buf_map *drv_buf_iter = drv_bufs;
620 u8 flag, sgl_flags, sgl_flag_eob, sgl_flags_last, last_chain_sgl_flag;
621 u16 available_sges, i, sges_needed;
622 u32 sge_element_size = sizeof(struct mpi3_sge_common);
623 bool chain_used = false;
625 sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE |
626 MPI3_SGE_FLAGS_DLAS_SYSTEM;
627 sgl_flag_eob = sgl_flags | MPI3_SGE_FLAGS_END_OF_BUFFER;
628 sgl_flags_last = sgl_flag_eob | MPI3_SGE_FLAGS_END_OF_LIST;
629 last_chain_sgl_flag = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN |
630 MPI3_SGE_FLAGS_DLAS_SYSTEM;
632 sges_needed = mpi3mr_total_num_ioctl_sges(drv_bufs, bufcnt);
635 mpi3mr_add_sg_single(&rmgmt_req->command_sgl,
636 sgl_flags_last, drv_buf_iter->kern_buf_len,
637 drv_buf_iter->kern_buf_dma);
638 sgl = (u8 *)drv_buf_iter->kern_buf +
639 drv_buf_iter->bsg_buf_len;
640 available_sges = (drv_buf_iter->kern_buf_len -
641 drv_buf_iter->bsg_buf_len) / sge_element_size;
643 if (sges_needed > available_sges)
650 mpi3mr_add_sg_single(&rmgmt_req->response_sgl,
651 sgl_flags_last, drv_buf_iter->kern_buf_len,
652 drv_buf_iter->kern_buf_dma);
656 mpi3mr_build_zero_len_sge(
657 &rmgmt_req->response_sgl);
663 if (sgl_offset >= MPI3MR_ADMIN_REQ_FRAME_SZ)
665 available_sges = (MPI3MR_ADMIN_REQ_FRAME_SZ - sgl_offset) /
671 mpi3mr_build_zero_len_sge(sgl);
674 if (mpi_header->function == MPI3_BSG_FUNCTION_SMP_PASSTHROUGH) {
675 if ((sges_needed > 2) || (sges_needed > available_sges))
677 for (; count < bufcnt; count++, drv_buf_iter++) {
678 if (drv_buf_iter->data_dir == DMA_NONE ||
679 !drv_buf_iter->num_dma_desc)
681 mpi3mr_add_sg_single(sgl, sgl_flags_last,
682 drv_buf_iter->dma_desc[0].size,
683 drv_buf_iter->dma_desc[0].dma_addr);
684 sgl += sge_element_size;
691 for (; count < bufcnt; count++, drv_buf_iter++) {
692 if (drv_buf_iter->data_dir == DMA_NONE)
694 if (!drv_buf_iter->num_dma_desc) {
695 if (chain_used && !available_sges)
697 if (!chain_used && (available_sges == 1) &&
701 if (num_datasges == 1)
702 flag = sgl_flags_last;
703 mpi3mr_add_sg_single(sgl, flag, 0, 0);
704 sgl += sge_element_size;
710 for (; i < drv_buf_iter->num_dma_desc; i++) {
711 if (chain_used && !available_sges)
713 if (!chain_used && (available_sges == 1) &&
717 if (i == (drv_buf_iter->num_dma_desc - 1)) {
718 if (num_datasges == 1)
719 flag = sgl_flags_last;
724 mpi3mr_add_sg_single(sgl, flag,
725 drv_buf_iter->dma_desc[i].size,
726 drv_buf_iter->dma_desc[i].dma_addr);
727 sgl += sge_element_size;
737 available_sges = mrioc->ioctl_chain_sge.size / sge_element_size;
738 if (sges_needed > available_sges)
740 mpi3mr_add_sg_single(sgl, last_chain_sgl_flag,
741 (sges_needed * sge_element_size),
742 mrioc->ioctl_chain_sge.dma_addr);
743 memset(mrioc->ioctl_chain_sge.addr, 0, mrioc->ioctl_chain_sge.size);
744 sgl = (u8 *)mrioc->ioctl_chain_sge.addr;
750 * mpi3mr_get_nvme_data_fmt - returns the NVMe data format
751 * @nvme_encap_request: NVMe encapsulated MPI request
753 * This function returns the type of the data format specified
754 * in user provided NVMe command in NVMe encapsulated request.
756 * Return: Data format of the NVMe command (PRP/SGL etc)
758 static unsigned int mpi3mr_get_nvme_data_fmt(
759 struct mpi3_nvme_encapsulated_request *nvme_encap_request)
763 format = ((nvme_encap_request->command[0] & 0xc000) >> 14);
769 * mpi3mr_build_nvme_sgl - SGL constructor for NVME
770 * encapsulated request
771 * @mrioc: Adapter instance reference
772 * @nvme_encap_request: NVMe encapsulated MPI request
773 * @drv_bufs: DMA address of the buffers to be placed in sgl
774 * @bufcnt: Number of DMA buffers
776 * This function places the DMA address of the given buffers in
777 * proper format as SGEs in the given NVMe encapsulated request.
779 * Return: 0 on success, -1 on failure
781 static int mpi3mr_build_nvme_sgl(struct mpi3mr_ioc *mrioc,
782 struct mpi3_nvme_encapsulated_request *nvme_encap_request,
783 struct mpi3mr_buf_map *drv_bufs, u8 bufcnt)
785 struct mpi3mr_nvme_pt_sge *nvme_sgl;
789 u16 available_sges = 0, i;
790 u32 sge_element_size = sizeof(struct mpi3mr_nvme_pt_sge);
791 struct mpi3mr_buf_map *drv_buf_iter = drv_bufs;
792 u64 sgemod_mask = ((u64)((mrioc->facts.sge_mod_mask) <<
793 mrioc->facts.sge_mod_shift) << 32);
794 u64 sgemod_val = ((u64)(mrioc->facts.sge_mod_value) <<
795 mrioc->facts.sge_mod_shift) << 32;
798 nvme_sgl = (struct mpi3mr_nvme_pt_sge *)
799 ((u8 *)(nvme_encap_request->command) + MPI3MR_NVME_CMD_SGL_OFFSET);
802 * Not all commands require a data transfer. If no data, just return
803 * without constructing any sgl.
805 for (count = 0; count < bufcnt; count++, drv_buf_iter++) {
806 if (drv_buf_iter->data_dir == DMA_NONE)
808 length = drv_buf_iter->kern_buf_len;
811 if (!length || !drv_buf_iter->num_dma_desc)
814 if (drv_buf_iter->num_dma_desc == 1) {
819 sgl_dma = cpu_to_le64(mrioc->ioctl_chain_sge.dma_addr);
820 if (sgl_dma & sgemod_mask) {
821 dprint_bsg_err(mrioc,
822 "%s: SGL chain address collides with SGE modifier\n",
827 sgl_dma &= ~sgemod_mask;
828 sgl_dma |= sgemod_val;
830 memset(mrioc->ioctl_chain_sge.addr, 0, mrioc->ioctl_chain_sge.size);
831 available_sges = mrioc->ioctl_chain_sge.size / sge_element_size;
832 if (available_sges < drv_buf_iter->num_dma_desc)
834 memset(nvme_sgl, 0, sizeof(struct mpi3mr_nvme_pt_sge));
835 nvme_sgl->base_addr = sgl_dma;
836 size = drv_buf_iter->num_dma_desc * sizeof(struct mpi3mr_nvme_pt_sge);
837 nvme_sgl->length = cpu_to_le32(size);
838 nvme_sgl->type = MPI3MR_NVMESGL_LAST_SEGMENT;
839 nvme_sgl = (struct mpi3mr_nvme_pt_sge *)mrioc->ioctl_chain_sge.addr;
842 for (i = 0; i < drv_buf_iter->num_dma_desc; i++) {
843 sgl_dma = cpu_to_le64(drv_buf_iter->dma_desc[i].dma_addr);
844 if (sgl_dma & sgemod_mask) {
845 dprint_bsg_err(mrioc,
846 "%s: SGL address collides with SGE modifier\n",
851 sgl_dma &= ~sgemod_mask;
852 sgl_dma |= sgemod_val;
854 nvme_sgl->base_addr = sgl_dma;
855 nvme_sgl->length = cpu_to_le32(drv_buf_iter->dma_desc[i].size);
856 nvme_sgl->type = MPI3MR_NVMESGL_DATA_SEGMENT;
865 * mpi3mr_build_nvme_prp - PRP constructor for NVME
866 * encapsulated request
867 * @mrioc: Adapter instance reference
868 * @nvme_encap_request: NVMe encapsulated MPI request
869 * @drv_bufs: DMA address of the buffers to be placed in SGL
870 * @bufcnt: Number of DMA buffers
872 * This function places the DMA address of the given buffers in
873 * proper format as PRP entries in the given NVMe encapsulated
876 * Return: 0 on success, -1 on failure
878 static int mpi3mr_build_nvme_prp(struct mpi3mr_ioc *mrioc,
879 struct mpi3_nvme_encapsulated_request *nvme_encap_request,
880 struct mpi3mr_buf_map *drv_bufs, u8 bufcnt)
882 int prp_size = MPI3MR_NVME_PRP_SIZE;
883 __le64 *prp_entry, *prp1_entry, *prp2_entry;
885 dma_addr_t prp_entry_dma, prp_page_dma, dma_addr;
886 u32 offset, entry_len, dev_pgsz;
887 u32 page_mask_result, page_mask;
888 size_t length = 0, desc_len;
890 struct mpi3mr_buf_map *drv_buf_iter = drv_bufs;
891 u64 sgemod_mask = ((u64)((mrioc->facts.sge_mod_mask) <<
892 mrioc->facts.sge_mod_shift) << 32);
893 u64 sgemod_val = ((u64)(mrioc->facts.sge_mod_value) <<
894 mrioc->facts.sge_mod_shift) << 32;
895 u16 dev_handle = nvme_encap_request->dev_handle;
896 struct mpi3mr_tgt_dev *tgtdev;
899 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
901 dprint_bsg_err(mrioc, "%s: invalid device handle 0x%04x\n",
902 __func__, dev_handle);
906 if (tgtdev->dev_spec.pcie_inf.pgsz == 0) {
907 dprint_bsg_err(mrioc,
908 "%s: NVMe device page size is zero for handle 0x%04x\n",
909 __func__, dev_handle);
910 mpi3mr_tgtdev_put(tgtdev);
914 dev_pgsz = 1 << (tgtdev->dev_spec.pcie_inf.pgsz);
915 mpi3mr_tgtdev_put(tgtdev);
916 page_mask = dev_pgsz - 1;
918 if (dev_pgsz > MPI3MR_IOCTL_SGE_SIZE) {
919 dprint_bsg_err(mrioc,
920 "%s: NVMe device page size(%d) is greater than ioctl data sge size(%d) for handle 0x%04x\n",
921 __func__, dev_pgsz, MPI3MR_IOCTL_SGE_SIZE, dev_handle);
925 if (MPI3MR_IOCTL_SGE_SIZE % dev_pgsz) {
926 dprint_bsg_err(mrioc,
927 "%s: ioctl data sge size(%d) is not a multiple of NVMe device page size(%d) for handle 0x%04x\n",
928 __func__, MPI3MR_IOCTL_SGE_SIZE, dev_pgsz, dev_handle);
933 * Not all commands require a data transfer. If no data, just return
934 * without constructing any PRP.
936 for (count = 0; count < bufcnt; count++, drv_buf_iter++) {
937 if (drv_buf_iter->data_dir == DMA_NONE)
939 length = drv_buf_iter->kern_buf_len;
943 if (!length || !drv_buf_iter->num_dma_desc)
946 for (count = 0; count < drv_buf_iter->num_dma_desc; count++) {
947 dma_addr = drv_buf_iter->dma_desc[count].dma_addr;
948 if (dma_addr & page_mask) {
949 dprint_bsg_err(mrioc,
950 "%s:dma_addr %pad is not aligned with page size 0x%x\n",
951 __func__, &dma_addr, dev_pgsz);
956 dma_addr = drv_buf_iter->dma_desc[0].dma_addr;
957 desc_len = drv_buf_iter->dma_desc[0].size;
960 mrioc->prp_list_virt = dma_alloc_coherent(&mrioc->pdev->dev,
961 dev_pgsz, &mrioc->prp_list_dma, GFP_KERNEL);
963 if (!mrioc->prp_list_virt)
965 mrioc->prp_sz = dev_pgsz;
968 * Set pointers to PRP1 and PRP2, which are in the NVMe command.
969 * PRP1 is located at a 24 byte offset from the start of the NVMe
970 * command. Then set the current PRP entry pointer to PRP1.
972 prp1_entry = (__le64 *)((u8 *)(nvme_encap_request->command) +
973 MPI3MR_NVME_CMD_PRP1_OFFSET);
974 prp2_entry = (__le64 *)((u8 *)(nvme_encap_request->command) +
975 MPI3MR_NVME_CMD_PRP2_OFFSET);
976 prp_entry = prp1_entry;
978 * For the PRP entries, use the specially allocated buffer of
981 prp_page = (__le64 *)mrioc->prp_list_virt;
982 prp_page_dma = mrioc->prp_list_dma;
985 * Check if we are within 1 entry of a page boundary we don't
986 * want our first entry to be a PRP List entry.
988 page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask;
989 if (!page_mask_result) {
990 dprint_bsg_err(mrioc, "%s: PRP page is not page aligned\n",
996 * Set PRP physical pointer, which initially points to the current PRP
999 prp_entry_dma = prp_page_dma;
1002 /* Loop while the length is not zero. */
1004 page_mask_result = (prp_entry_dma + prp_size) & page_mask;
1005 if (!page_mask_result && (length > dev_pgsz)) {
1006 dprint_bsg_err(mrioc,
1007 "%s: single PRP page is not sufficient\n",
1012 /* Need to handle if entry will be part of a page. */
1013 offset = dma_addr & page_mask;
1014 entry_len = dev_pgsz - offset;
1016 if (prp_entry == prp1_entry) {
1018 * Must fill in the first PRP pointer (PRP1) before
1021 *prp1_entry = cpu_to_le64(dma_addr);
1022 if (*prp1_entry & sgemod_mask) {
1023 dprint_bsg_err(mrioc,
1024 "%s: PRP1 address collides with SGE modifier\n",
1028 *prp1_entry &= ~sgemod_mask;
1029 *prp1_entry |= sgemod_val;
1032 * Now point to the second PRP entry within the
1035 prp_entry = prp2_entry;
1036 } else if (prp_entry == prp2_entry) {
1038 * Should the PRP2 entry be a PRP List pointer or just
1039 * a regular PRP pointer? If there is more than one
1040 * more page of data, must use a PRP List pointer.
1042 if (length > dev_pgsz) {
1044 * PRP2 will contain a PRP List pointer because
1045 * more PRP's are needed with this command. The
1046 * list will start at the beginning of the
1047 * contiguous buffer.
1049 *prp2_entry = cpu_to_le64(prp_entry_dma);
1050 if (*prp2_entry & sgemod_mask) {
1051 dprint_bsg_err(mrioc,
1052 "%s: PRP list address collides with SGE modifier\n",
1056 *prp2_entry &= ~sgemod_mask;
1057 *prp2_entry |= sgemod_val;
1060 * The next PRP Entry will be the start of the
1063 prp_entry = prp_page;
1067 * After this, the PRP Entries are complete.
1068 * This command uses 2 PRP's and no PRP list.
1070 *prp2_entry = cpu_to_le64(dma_addr);
1071 if (*prp2_entry & sgemod_mask) {
1072 dprint_bsg_err(mrioc,
1073 "%s: PRP2 collides with SGE modifier\n",
1077 *prp2_entry &= ~sgemod_mask;
1078 *prp2_entry |= sgemod_val;
1082 * Put entry in list and bump the addresses.
1084 * After PRP1 and PRP2 are filled in, this will fill in
1085 * all remaining PRP entries in a PRP List, one per
1086 * each time through the loop.
1088 *prp_entry = cpu_to_le64(dma_addr);
1089 if (*prp_entry & sgemod_mask) {
1090 dprint_bsg_err(mrioc,
1091 "%s: PRP address collides with SGE modifier\n",
1095 *prp_entry &= ~sgemod_mask;
1096 *prp_entry |= sgemod_val;
1098 prp_entry_dma += prp_size;
1101 /* decrement length accounting for last partial page. */
1102 if (entry_len >= length) {
1105 if (entry_len <= desc_len) {
1106 dma_addr += entry_len;
1107 desc_len -= entry_len;
1110 if ((++desc_count) >=
1111 drv_buf_iter->num_dma_desc) {
1112 dprint_bsg_err(mrioc,
1113 "%s: Invalid len %zd while building PRP\n",
1118 drv_buf_iter->dma_desc[desc_count].dma_addr;
1120 drv_buf_iter->dma_desc[desc_count].size;
1122 length -= entry_len;
1128 if (mrioc->prp_list_virt) {
1129 dma_free_coherent(&mrioc->pdev->dev, mrioc->prp_sz,
1130 mrioc->prp_list_virt, mrioc->prp_list_dma);
1131 mrioc->prp_list_virt = NULL;
1137 * mpi3mr_map_data_buffer_dma - build dma descriptors for data
1139 * @mrioc: Adapter instance reference
1140 * @drv_buf: buffer map descriptor
1141 * @desc_count: Number of already consumed dma descriptors
1143 * This function computes how many pre-allocated DMA descriptors
1144 * are required for the given data buffer and if those number of
1145 * descriptors are free, then setup the mapping of the scattered
1146 * DMA address to the given data buffer, if the data direction
1147 * of the buffer is DMA_TO_DEVICE then the actual data is copied to
1150 * Return: 0 on success, -1 on failure
1152 static int mpi3mr_map_data_buffer_dma(struct mpi3mr_ioc *mrioc,
1153 struct mpi3mr_buf_map *drv_buf,
1156 u16 i, needed_desc = drv_buf->kern_buf_len / MPI3MR_IOCTL_SGE_SIZE;
1157 u32 buf_len = drv_buf->kern_buf_len, copied_len = 0;
1159 if (drv_buf->kern_buf_len % MPI3MR_IOCTL_SGE_SIZE)
1161 if ((needed_desc + desc_count) > MPI3MR_NUM_IOCTL_SGE) {
1162 dprint_bsg_err(mrioc, "%s: DMA descriptor mapping error %d:%d:%d\n",
1163 __func__, needed_desc, desc_count, MPI3MR_NUM_IOCTL_SGE);
1166 drv_buf->dma_desc = kzalloc(sizeof(*drv_buf->dma_desc) * needed_desc,
1168 if (!drv_buf->dma_desc)
1170 for (i = 0; i < needed_desc; i++, desc_count++) {
1171 drv_buf->dma_desc[i].addr = mrioc->ioctl_sge[desc_count].addr;
1172 drv_buf->dma_desc[i].dma_addr =
1173 mrioc->ioctl_sge[desc_count].dma_addr;
1174 if (buf_len < mrioc->ioctl_sge[desc_count].size)
1175 drv_buf->dma_desc[i].size = buf_len;
1177 drv_buf->dma_desc[i].size =
1178 mrioc->ioctl_sge[desc_count].size;
1179 buf_len -= drv_buf->dma_desc[i].size;
1180 memset(drv_buf->dma_desc[i].addr, 0,
1181 mrioc->ioctl_sge[desc_count].size);
1182 if (drv_buf->data_dir == DMA_TO_DEVICE) {
1183 memcpy(drv_buf->dma_desc[i].addr,
1184 drv_buf->bsg_buf + copied_len,
1185 drv_buf->dma_desc[i].size);
1186 copied_len += drv_buf->dma_desc[i].size;
1189 drv_buf->num_dma_desc = needed_desc;
1193 * mpi3mr_bsg_process_mpt_cmds - MPI Pass through BSG handler
1194 * @job: BSG job reference
1196 * This function is the top level handler for MPI Pass through
1197 * command, this does basic validation of the input data buffers,
1198 * identifies the given buffer types and MPI command, allocates
1199 * DMAable memory for user given buffers, construstcs SGL
1200 * properly and passes the command to the firmware.
1202 * Once the MPI command is completed the driver copies the data
1203 * if any and reply, sense information to user provided buffers.
1204 * If the command is timed out then issues controller reset
1205 * prior to returning.
1207 * Return: 0 on success and proper error codes on failure
1210 static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job)
1212 long rval = -EINVAL;
1213 struct mpi3mr_ioc *mrioc = NULL;
1214 u8 *mpi_req = NULL, *sense_buff_k = NULL;
1215 u8 mpi_msg_size = 0;
1216 struct mpi3mr_bsg_packet *bsg_req = NULL;
1217 struct mpi3mr_bsg_mptcmd *karg;
1218 struct mpi3mr_buf_entry *buf_entries = NULL;
1219 struct mpi3mr_buf_map *drv_bufs = NULL, *drv_buf_iter = NULL;
1220 u8 count, bufcnt = 0, is_rmcb = 0, is_rmrb = 0;
1221 u8 din_cnt = 0, dout_cnt = 0;
1222 u8 invalid_be = 0, erb_offset = 0xFF, mpirep_offset = 0xFF;
1223 u8 block_io = 0, nvme_fmt = 0, resp_code = 0;
1224 struct mpi3_request_header *mpi_header = NULL;
1225 struct mpi3_status_reply_descriptor *status_desc;
1226 struct mpi3_scsi_task_mgmt_request *tm_req;
1227 u32 erbsz = MPI3MR_SENSE_BUF_SZ, tmplen;
1229 struct mpi3mr_tgt_dev *tgtdev;
1230 struct mpi3mr_stgt_priv_data *stgt_priv = NULL;
1231 struct mpi3mr_bsg_in_reply_buf *bsg_reply_buf = NULL;
1232 u32 din_size = 0, dout_size = 0;
1233 u8 *din_buf = NULL, *dout_buf = NULL;
1234 u8 *sgl_iter = NULL, *sgl_din_iter = NULL, *sgl_dout_iter = NULL;
1235 u16 rmc_size = 0, desc_count = 0;
1237 bsg_req = job->request;
1238 karg = (struct mpi3mr_bsg_mptcmd *)&bsg_req->cmd.mptcmd;
1240 mrioc = mpi3mr_bsg_verify_adapter(karg->mrioc_id);
1244 if (!mrioc->ioctl_sges_allocated) {
1245 dprint_bsg_err(mrioc, "%s: DMA memory was not allocated\n",
1250 if (karg->timeout < MPI3MR_APP_DEFAULT_TIMEOUT)
1251 karg->timeout = MPI3MR_APP_DEFAULT_TIMEOUT;
1253 mpi_req = kzalloc(MPI3MR_ADMIN_REQ_FRAME_SZ, GFP_KERNEL);
1256 mpi_header = (struct mpi3_request_header *)mpi_req;
1258 bufcnt = karg->buf_entry_list.num_of_entries;
1259 drv_bufs = kzalloc((sizeof(*drv_bufs) * bufcnt), GFP_KERNEL);
1265 dout_buf = kzalloc(job->request_payload.payload_len,
1272 din_buf = kzalloc(job->reply_payload.payload_len,
1279 sg_copy_to_buffer(job->request_payload.sg_list,
1280 job->request_payload.sg_cnt,
1281 dout_buf, job->request_payload.payload_len);
1283 buf_entries = karg->buf_entry_list.buf_entry;
1284 sgl_din_iter = din_buf;
1285 sgl_dout_iter = dout_buf;
1286 drv_buf_iter = drv_bufs;
1288 for (count = 0; count < bufcnt; count++, buf_entries++, drv_buf_iter++) {
1290 switch (buf_entries->buf_type) {
1291 case MPI3MR_BSG_BUFTYPE_RAIDMGMT_CMD:
1292 sgl_iter = sgl_dout_iter;
1293 sgl_dout_iter += buf_entries->buf_len;
1294 drv_buf_iter->data_dir = DMA_TO_DEVICE;
1296 if ((count != 0) || !buf_entries->buf_len)
1299 case MPI3MR_BSG_BUFTYPE_RAIDMGMT_RESP:
1300 sgl_iter = sgl_din_iter;
1301 sgl_din_iter += buf_entries->buf_len;
1302 drv_buf_iter->data_dir = DMA_FROM_DEVICE;
1304 if (count != 1 || !is_rmcb || !buf_entries->buf_len)
1307 case MPI3MR_BSG_BUFTYPE_DATA_IN:
1308 sgl_iter = sgl_din_iter;
1309 sgl_din_iter += buf_entries->buf_len;
1310 drv_buf_iter->data_dir = DMA_FROM_DEVICE;
1312 din_size += buf_entries->buf_len;
1313 if ((din_cnt > 1) && !is_rmcb)
1316 case MPI3MR_BSG_BUFTYPE_DATA_OUT:
1317 sgl_iter = sgl_dout_iter;
1318 sgl_dout_iter += buf_entries->buf_len;
1319 drv_buf_iter->data_dir = DMA_TO_DEVICE;
1321 dout_size += buf_entries->buf_len;
1322 if ((dout_cnt > 1) && !is_rmcb)
1325 case MPI3MR_BSG_BUFTYPE_MPI_REPLY:
1326 sgl_iter = sgl_din_iter;
1327 sgl_din_iter += buf_entries->buf_len;
1328 drv_buf_iter->data_dir = DMA_NONE;
1329 mpirep_offset = count;
1330 if (!buf_entries->buf_len)
1333 case MPI3MR_BSG_BUFTYPE_ERR_RESPONSE:
1334 sgl_iter = sgl_din_iter;
1335 sgl_din_iter += buf_entries->buf_len;
1336 drv_buf_iter->data_dir = DMA_NONE;
1338 if (!buf_entries->buf_len)
1341 case MPI3MR_BSG_BUFTYPE_MPI_REQUEST:
1342 sgl_iter = sgl_dout_iter;
1343 sgl_dout_iter += buf_entries->buf_len;
1344 drv_buf_iter->data_dir = DMA_NONE;
1345 mpi_msg_size = buf_entries->buf_len;
1346 if ((!mpi_msg_size || (mpi_msg_size % 4)) ||
1347 (mpi_msg_size > MPI3MR_ADMIN_REQ_FRAME_SZ)) {
1348 dprint_bsg_err(mrioc, "%s: invalid MPI message size\n",
1353 memcpy(mpi_req, sgl_iter, buf_entries->buf_len);
1360 dprint_bsg_err(mrioc, "%s: invalid buffer entries passed\n",
1366 if (sgl_dout_iter > (dout_buf + job->request_payload.payload_len)) {
1367 dprint_bsg_err(mrioc, "%s: data_out buffer length mismatch\n",
1372 if (sgl_din_iter > (din_buf + job->reply_payload.payload_len)) {
1373 dprint_bsg_err(mrioc, "%s: data_in buffer length mismatch\n",
1379 drv_buf_iter->bsg_buf = sgl_iter;
1380 drv_buf_iter->bsg_buf_len = buf_entries->buf_len;
1383 if (is_rmcb && ((din_size + dout_size) > MPI3MR_MAX_APP_XFER_SIZE)) {
1384 dprint_bsg_err(mrioc, "%s:%d: invalid data transfer size passed for function 0x%x din_size = %d, dout_size = %d\n",
1385 __func__, __LINE__, mpi_header->function, din_size,
1391 if (din_size > MPI3MR_MAX_APP_XFER_SIZE) {
1392 dprint_bsg_err(mrioc,
1393 "%s:%d: invalid data transfer size passed for function 0x%x din_size=%d\n",
1394 __func__, __LINE__, mpi_header->function, din_size);
1398 if (dout_size > MPI3MR_MAX_APP_XFER_SIZE) {
1399 dprint_bsg_err(mrioc,
1400 "%s:%d: invalid data transfer size passed for function 0x%x dout_size = %d\n",
1401 __func__, __LINE__, mpi_header->function, dout_size);
1406 if (mpi_header->function == MPI3_BSG_FUNCTION_SMP_PASSTHROUGH) {
1407 if (din_size > MPI3MR_IOCTL_SGE_SIZE ||
1408 dout_size > MPI3MR_IOCTL_SGE_SIZE) {
1409 dprint_bsg_err(mrioc, "%s:%d: invalid message size passed:%d:%d:%d:%d\n",
1410 __func__, __LINE__, din_cnt, dout_cnt, din_size,
1417 drv_buf_iter = drv_bufs;
1418 for (count = 0; count < bufcnt; count++, drv_buf_iter++) {
1419 if (drv_buf_iter->data_dir == DMA_NONE)
1422 drv_buf_iter->kern_buf_len = drv_buf_iter->bsg_buf_len;
1423 if (is_rmcb && !count) {
1424 drv_buf_iter->kern_buf_len =
1425 mrioc->ioctl_chain_sge.size;
1426 drv_buf_iter->kern_buf =
1427 mrioc->ioctl_chain_sge.addr;
1428 drv_buf_iter->kern_buf_dma =
1429 mrioc->ioctl_chain_sge.dma_addr;
1430 drv_buf_iter->dma_desc = NULL;
1431 drv_buf_iter->num_dma_desc = 0;
1432 memset(drv_buf_iter->kern_buf, 0,
1433 drv_buf_iter->kern_buf_len);
1434 tmplen = min(drv_buf_iter->kern_buf_len,
1435 drv_buf_iter->bsg_buf_len);
1437 memcpy(drv_buf_iter->kern_buf, drv_buf_iter->bsg_buf, tmplen);
1438 } else if (is_rmrb && (count == 1)) {
1439 drv_buf_iter->kern_buf_len =
1440 mrioc->ioctl_resp_sge.size;
1441 drv_buf_iter->kern_buf =
1442 mrioc->ioctl_resp_sge.addr;
1443 drv_buf_iter->kern_buf_dma =
1444 mrioc->ioctl_resp_sge.dma_addr;
1445 drv_buf_iter->dma_desc = NULL;
1446 drv_buf_iter->num_dma_desc = 0;
1447 memset(drv_buf_iter->kern_buf, 0,
1448 drv_buf_iter->kern_buf_len);
1449 tmplen = min(drv_buf_iter->kern_buf_len,
1450 drv_buf_iter->bsg_buf_len);
1451 drv_buf_iter->kern_buf_len = tmplen;
1452 memset(drv_buf_iter->bsg_buf, 0,
1453 drv_buf_iter->bsg_buf_len);
1455 if (!drv_buf_iter->kern_buf_len)
1457 if (mpi3mr_map_data_buffer_dma(mrioc, drv_buf_iter, desc_count)) {
1459 dprint_bsg_err(mrioc, "%s:%d: mapping data buffers failed\n",
1460 __func__, __LINE__);
1463 desc_count += drv_buf_iter->num_dma_desc;
1467 if (erb_offset != 0xFF) {
1468 sense_buff_k = kzalloc(erbsz, GFP_KERNEL);
1469 if (!sense_buff_k) {
1475 if (mutex_lock_interruptible(&mrioc->bsg_cmds.mutex)) {
1476 rval = -ERESTARTSYS;
1479 if (mrioc->bsg_cmds.state & MPI3MR_CMD_PENDING) {
1481 dprint_bsg_err(mrioc, "%s: command is in use\n", __func__);
1482 mutex_unlock(&mrioc->bsg_cmds.mutex);
1485 if (mrioc->unrecoverable) {
1486 dprint_bsg_err(mrioc, "%s: unrecoverable controller\n",
1489 mutex_unlock(&mrioc->bsg_cmds.mutex);
1492 if (mrioc->reset_in_progress) {
1493 dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__);
1495 mutex_unlock(&mrioc->bsg_cmds.mutex);
1498 if (mrioc->stop_bsgs) {
1499 dprint_bsg_err(mrioc, "%s: bsgs are blocked\n", __func__);
1501 mutex_unlock(&mrioc->bsg_cmds.mutex);
1505 if (mpi_header->function == MPI3_BSG_FUNCTION_NVME_ENCAPSULATED) {
1506 nvme_fmt = mpi3mr_get_nvme_data_fmt(
1507 (struct mpi3_nvme_encapsulated_request *)mpi_req);
1508 if (nvme_fmt == MPI3MR_NVME_DATA_FORMAT_PRP) {
1509 if (mpi3mr_build_nvme_prp(mrioc,
1510 (struct mpi3_nvme_encapsulated_request *)mpi_req,
1511 drv_bufs, bufcnt)) {
1513 mutex_unlock(&mrioc->bsg_cmds.mutex);
1516 } else if (nvme_fmt == MPI3MR_NVME_DATA_FORMAT_SGL1 ||
1517 nvme_fmt == MPI3MR_NVME_DATA_FORMAT_SGL2) {
1518 if (mpi3mr_build_nvme_sgl(mrioc,
1519 (struct mpi3_nvme_encapsulated_request *)mpi_req,
1520 drv_bufs, bufcnt)) {
1522 mutex_unlock(&mrioc->bsg_cmds.mutex);
1526 dprint_bsg_err(mrioc,
1527 "%s:invalid NVMe command format\n", __func__);
1529 mutex_unlock(&mrioc->bsg_cmds.mutex);
1533 if (mpi3mr_bsg_build_sgl(mrioc, mpi_req, mpi_msg_size,
1534 drv_bufs, bufcnt, is_rmcb, is_rmrb,
1535 (dout_cnt + din_cnt))) {
1536 dprint_bsg_err(mrioc, "%s: sgl build failed\n", __func__);
1538 mutex_unlock(&mrioc->bsg_cmds.mutex);
1543 if (mpi_header->function == MPI3_BSG_FUNCTION_SCSI_TASK_MGMT) {
1544 tm_req = (struct mpi3_scsi_task_mgmt_request *)mpi_req;
1545 if (tm_req->task_type !=
1546 MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK) {
1547 dev_handle = tm_req->dev_handle;
1552 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
1553 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) {
1554 stgt_priv = (struct mpi3mr_stgt_priv_data *)
1555 tgtdev->starget->hostdata;
1556 atomic_inc(&stgt_priv->block_io);
1557 mpi3mr_tgtdev_put(tgtdev);
1561 mrioc->bsg_cmds.state = MPI3MR_CMD_PENDING;
1562 mrioc->bsg_cmds.is_waiting = 1;
1563 mrioc->bsg_cmds.callback = NULL;
1564 mrioc->bsg_cmds.is_sense = 0;
1565 mrioc->bsg_cmds.sensebuf = sense_buff_k;
1566 memset(mrioc->bsg_cmds.reply, 0, mrioc->reply_sz);
1567 mpi_header->host_tag = cpu_to_le16(MPI3MR_HOSTTAG_BSG_CMDS);
1568 if (mrioc->logging_level & MPI3_DEBUG_BSG_INFO) {
1569 dprint_bsg_info(mrioc,
1570 "%s: posting bsg request to the controller\n", __func__);
1571 dprint_dump(mpi_req, MPI3MR_ADMIN_REQ_FRAME_SZ,
1573 if (mpi_header->function == MPI3_BSG_FUNCTION_MGMT_PASSTHROUGH) {
1574 drv_buf_iter = &drv_bufs[0];
1575 dprint_dump(drv_buf_iter->kern_buf,
1576 rmc_size, "mpi3_mgmt_req");
1580 init_completion(&mrioc->bsg_cmds.done);
1581 rval = mpi3mr_admin_request_post(mrioc, mpi_req,
1582 MPI3MR_ADMIN_REQ_FRAME_SZ, 0);
1586 mrioc->bsg_cmds.is_waiting = 0;
1587 dprint_bsg_err(mrioc,
1588 "%s: posting bsg request is failed\n", __func__);
1592 wait_for_completion_timeout(&mrioc->bsg_cmds.done,
1593 (karg->timeout * HZ));
1594 if (block_io && stgt_priv)
1595 atomic_dec(&stgt_priv->block_io);
1596 if (!(mrioc->bsg_cmds.state & MPI3MR_CMD_COMPLETE)) {
1597 mrioc->bsg_cmds.is_waiting = 0;
1599 if (mrioc->bsg_cmds.state & MPI3MR_CMD_RESET)
1601 dprint_bsg_err(mrioc,
1602 "%s: bsg request timedout after %d seconds\n", __func__,
1604 if (mrioc->logging_level & MPI3_DEBUG_BSG_ERROR) {
1605 dprint_dump(mpi_req, MPI3MR_ADMIN_REQ_FRAME_SZ,
1607 if (mpi_header->function ==
1608 MPI3_BSG_FUNCTION_MGMT_PASSTHROUGH) {
1609 drv_buf_iter = &drv_bufs[0];
1610 dprint_dump(drv_buf_iter->kern_buf,
1611 rmc_size, "mpi3_mgmt_req");
1614 if ((mpi_header->function == MPI3_BSG_FUNCTION_NVME_ENCAPSULATED) ||
1615 (mpi_header->function == MPI3_BSG_FUNCTION_SCSI_IO))
1616 mpi3mr_issue_tm(mrioc,
1617 MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
1618 mpi_header->function_dependent, 0,
1619 MPI3MR_HOSTTAG_BLK_TMS, MPI3MR_RESETTM_TIMEOUT,
1620 &mrioc->host_tm_cmds, &resp_code, NULL);
1621 if (!(mrioc->bsg_cmds.state & MPI3MR_CMD_COMPLETE) &&
1622 !(mrioc->bsg_cmds.state & MPI3MR_CMD_RESET))
1623 mpi3mr_soft_reset_handler(mrioc,
1624 MPI3MR_RESET_FROM_APP_TIMEOUT, 1);
1627 dprint_bsg_info(mrioc, "%s: bsg request is completed\n", __func__);
1629 if (mrioc->prp_list_virt) {
1630 dma_free_coherent(&mrioc->pdev->dev, mrioc->prp_sz,
1631 mrioc->prp_list_virt, mrioc->prp_list_dma);
1632 mrioc->prp_list_virt = NULL;
1635 if ((mrioc->bsg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
1636 != MPI3_IOCSTATUS_SUCCESS) {
1637 dprint_bsg_info(mrioc,
1638 "%s: command failed, ioc_status(0x%04x) log_info(0x%08x)\n",
1640 (mrioc->bsg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
1641 mrioc->bsg_cmds.ioc_loginfo);
1644 if ((mpirep_offset != 0xFF) &&
1645 drv_bufs[mpirep_offset].bsg_buf_len) {
1646 drv_buf_iter = &drv_bufs[mpirep_offset];
1647 drv_buf_iter->kern_buf_len = (sizeof(*bsg_reply_buf) - 1 +
1649 bsg_reply_buf = kzalloc(drv_buf_iter->kern_buf_len, GFP_KERNEL);
1651 if (!bsg_reply_buf) {
1655 if (mrioc->bsg_cmds.state & MPI3MR_CMD_REPLY_VALID) {
1656 bsg_reply_buf->mpi_reply_type =
1657 MPI3MR_BSG_MPI_REPLY_BUFTYPE_ADDRESS;
1658 memcpy(bsg_reply_buf->reply_buf,
1659 mrioc->bsg_cmds.reply, mrioc->reply_sz);
1661 bsg_reply_buf->mpi_reply_type =
1662 MPI3MR_BSG_MPI_REPLY_BUFTYPE_STATUS;
1663 status_desc = (struct mpi3_status_reply_descriptor *)
1664 bsg_reply_buf->reply_buf;
1665 status_desc->ioc_status = mrioc->bsg_cmds.ioc_status;
1666 status_desc->ioc_log_info = mrioc->bsg_cmds.ioc_loginfo;
1668 tmplen = min(drv_buf_iter->kern_buf_len,
1669 drv_buf_iter->bsg_buf_len);
1670 memcpy(drv_buf_iter->bsg_buf, bsg_reply_buf, tmplen);
1673 if (erb_offset != 0xFF && mrioc->bsg_cmds.sensebuf &&
1674 mrioc->bsg_cmds.is_sense) {
1675 drv_buf_iter = &drv_bufs[erb_offset];
1676 tmplen = min(erbsz, drv_buf_iter->bsg_buf_len);
1677 memcpy(drv_buf_iter->bsg_buf, sense_buff_k, tmplen);
1680 drv_buf_iter = drv_bufs;
1681 for (count = 0; count < bufcnt; count++, drv_buf_iter++) {
1682 if (drv_buf_iter->data_dir == DMA_NONE)
1684 if ((count == 1) && is_rmrb) {
1685 memcpy(drv_buf_iter->bsg_buf,
1686 drv_buf_iter->kern_buf,
1687 drv_buf_iter->kern_buf_len);
1688 } else if (drv_buf_iter->data_dir == DMA_FROM_DEVICE) {
1690 for (desc_count = 0;
1691 desc_count < drv_buf_iter->num_dma_desc;
1693 memcpy(((u8 *)drv_buf_iter->bsg_buf + tmplen),
1694 drv_buf_iter->dma_desc[desc_count].addr,
1695 drv_buf_iter->dma_desc[desc_count].size);
1697 drv_buf_iter->dma_desc[desc_count].size;
1704 job->reply_payload_rcv_len =
1705 sg_copy_from_buffer(job->reply_payload.sg_list,
1706 job->reply_payload.sg_cnt,
1707 din_buf, job->reply_payload.payload_len);
1709 mrioc->bsg_cmds.is_sense = 0;
1710 mrioc->bsg_cmds.sensebuf = NULL;
1711 mrioc->bsg_cmds.state = MPI3MR_CMD_NOTUSED;
1712 mutex_unlock(&mrioc->bsg_cmds.mutex);
1714 kfree(sense_buff_k);
1719 drv_buf_iter = drv_bufs;
1720 for (count = 0; count < bufcnt; count++, drv_buf_iter++)
1721 kfree(drv_buf_iter->dma_desc);
1724 kfree(bsg_reply_buf);
1729 * mpi3mr_app_save_logdata - Save Log Data events
1730 * @mrioc: Adapter instance reference
1731 * @event_data: event data associated with log data event
1732 * @event_data_size: event data size to copy
1734 * If log data event caching is enabled by the applicatiobns,
1735 * then this function saves the log data in the circular queue
1736 * and Sends async signal SIGIO to indicate there is an async
1737 * event from the firmware to the event monitoring applications.
1741 void mpi3mr_app_save_logdata(struct mpi3mr_ioc *mrioc, char *event_data,
1742 u16 event_data_size)
1744 u32 index = mrioc->logdata_buf_idx, sz;
1745 struct mpi3mr_logdata_entry *entry;
1747 if (!(mrioc->logdata_buf))
1750 entry = (struct mpi3mr_logdata_entry *)
1751 (mrioc->logdata_buf + (index * mrioc->logdata_entry_sz));
1752 entry->valid_entry = 1;
1753 sz = min(mrioc->logdata_entry_sz, event_data_size);
1754 memcpy(entry->data, event_data, sz);
1755 mrioc->logdata_buf_idx =
1756 ((++index) % MPI3MR_BSG_LOGDATA_MAX_ENTRIES);
1757 atomic64_inc(&event_counter);
1761 * mpi3mr_bsg_request - bsg request entry point
1762 * @job: BSG job reference
1764 * This is driver's entry point for bsg requests
1766 * Return: 0 on success and proper error codes on failure
1768 static int mpi3mr_bsg_request(struct bsg_job *job)
1770 long rval = -EINVAL;
1771 unsigned int reply_payload_rcv_len = 0;
1773 struct mpi3mr_bsg_packet *bsg_req = job->request;
1775 switch (bsg_req->cmd_type) {
1776 case MPI3MR_DRV_CMD:
1777 rval = mpi3mr_bsg_process_drv_cmds(job);
1779 case MPI3MR_MPT_CMD:
1780 rval = mpi3mr_bsg_process_mpt_cmds(job);
1783 pr_err("%s: unsupported BSG command(0x%08x)\n",
1784 MPI3MR_DRIVER_NAME, bsg_req->cmd_type);
1788 bsg_job_done(job, rval, reply_payload_rcv_len);
1794 * mpi3mr_bsg_exit - de-registration from bsg layer
1795 * @mrioc: Adapter instance reference
1797 * This will be called during driver unload and all
1798 * bsg resources allocated during load will be freed.
1802 void mpi3mr_bsg_exit(struct mpi3mr_ioc *mrioc)
1804 struct device *bsg_dev = &mrioc->bsg_dev;
1805 if (!mrioc->bsg_queue)
1808 bsg_remove_queue(mrioc->bsg_queue);
1809 mrioc->bsg_queue = NULL;
1811 device_del(bsg_dev);
1812 put_device(bsg_dev);
1816 * mpi3mr_bsg_node_release -release bsg device node
1817 * @dev: bsg device node
1819 * decrements bsg dev parent reference count
1823 static void mpi3mr_bsg_node_release(struct device *dev)
1825 put_device(dev->parent);
1829 * mpi3mr_bsg_init - registration with bsg layer
1830 * @mrioc: Adapter instance reference
1832 * This will be called during driver load and it will
1833 * register driver with bsg layer
1837 void mpi3mr_bsg_init(struct mpi3mr_ioc *mrioc)
1839 struct device *bsg_dev = &mrioc->bsg_dev;
1840 struct device *parent = &mrioc->shost->shost_gendev;
1842 device_initialize(bsg_dev);
1844 bsg_dev->parent = get_device(parent);
1845 bsg_dev->release = mpi3mr_bsg_node_release;
1847 dev_set_name(bsg_dev, "mpi3mrctl%u", mrioc->id);
1849 if (device_add(bsg_dev)) {
1850 ioc_err(mrioc, "%s: bsg device add failed\n",
1852 put_device(bsg_dev);
1856 mrioc->bsg_queue = bsg_setup_queue(bsg_dev, dev_name(bsg_dev),
1857 mpi3mr_bsg_request, NULL, 0);
1858 if (IS_ERR(mrioc->bsg_queue)) {
1859 ioc_err(mrioc, "%s: bsg registration failed\n",
1861 device_del(bsg_dev);
1862 put_device(bsg_dev);
1866 blk_queue_max_segments(mrioc->bsg_queue, MPI3MR_MAX_APP_XFER_SEGMENTS);
1867 blk_queue_max_hw_sectors(mrioc->bsg_queue, MPI3MR_MAX_APP_XFER_SECTORS);
1873 * version_fw_show - SysFS callback for firmware version read
1874 * @dev: class device
1875 * @attr: Device attributes
1876 * @buf: Buffer to copy
1878 * Return: sysfs_emit() return after copying firmware version
1881 version_fw_show(struct device *dev, struct device_attribute *attr,
1884 struct Scsi_Host *shost = class_to_shost(dev);
1885 struct mpi3mr_ioc *mrioc = shost_priv(shost);
1886 struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver;
1888 return sysfs_emit(buf, "%d.%d.%d.%d.%05d-%05d\n",
1889 fwver->gen_major, fwver->gen_minor, fwver->ph_major,
1890 fwver->ph_minor, fwver->cust_id, fwver->build_num);
1892 static DEVICE_ATTR_RO(version_fw);
1895 * fw_queue_depth_show - SysFS callback for firmware max cmds
1896 * @dev: class device
1897 * @attr: Device attributes
1898 * @buf: Buffer to copy
1900 * Return: sysfs_emit() return after copying firmware max commands
1903 fw_queue_depth_show(struct device *dev, struct device_attribute *attr,
1906 struct Scsi_Host *shost = class_to_shost(dev);
1907 struct mpi3mr_ioc *mrioc = shost_priv(shost);
1909 return sysfs_emit(buf, "%d\n", mrioc->facts.max_reqs);
1911 static DEVICE_ATTR_RO(fw_queue_depth);
1914 * op_req_q_count_show - SysFS callback for request queue count
1915 * @dev: class device
1916 * @attr: Device attributes
1917 * @buf: Buffer to copy
1919 * Return: sysfs_emit() return after copying request queue count
1922 op_req_q_count_show(struct device *dev, struct device_attribute *attr,
1925 struct Scsi_Host *shost = class_to_shost(dev);
1926 struct mpi3mr_ioc *mrioc = shost_priv(shost);
1928 return sysfs_emit(buf, "%d\n", mrioc->num_op_req_q);
1930 static DEVICE_ATTR_RO(op_req_q_count);
1933 * reply_queue_count_show - SysFS callback for reply queue count
1934 * @dev: class device
1935 * @attr: Device attributes
1936 * @buf: Buffer to copy
1938 * Return: sysfs_emit() return after copying reply queue count
1941 reply_queue_count_show(struct device *dev, struct device_attribute *attr,
1944 struct Scsi_Host *shost = class_to_shost(dev);
1945 struct mpi3mr_ioc *mrioc = shost_priv(shost);
1947 return sysfs_emit(buf, "%d\n", mrioc->num_op_reply_q);
1950 static DEVICE_ATTR_RO(reply_queue_count);
1953 * logging_level_show - Show controller debug level
1954 * @dev: class device
1955 * @attr: Device attributes
1956 * @buf: Buffer to copy
1958 * A sysfs 'read/write' shost attribute, to show the current
1959 * debug log level used by the driver for the specific
1962 * Return: sysfs_emit() return
1965 logging_level_show(struct device *dev,
1966 struct device_attribute *attr, char *buf)
1969 struct Scsi_Host *shost = class_to_shost(dev);
1970 struct mpi3mr_ioc *mrioc = shost_priv(shost);
1972 return sysfs_emit(buf, "%08xh\n", mrioc->logging_level);
1976 * logging_level_store- Change controller debug level
1977 * @dev: class device
1978 * @attr: Device attributes
1979 * @buf: Buffer to copy
1980 * @count: size of the buffer
1982 * A sysfs 'read/write' shost attribute, to change the current
1983 * debug log level used by the driver for the specific
1986 * Return: strlen() return
1989 logging_level_store(struct device *dev,
1990 struct device_attribute *attr,
1991 const char *buf, size_t count)
1993 struct Scsi_Host *shost = class_to_shost(dev);
1994 struct mpi3mr_ioc *mrioc = shost_priv(shost);
1997 if (kstrtoint(buf, 0, &val) != 0)
2000 mrioc->logging_level = val;
2001 ioc_info(mrioc, "logging_level=%08xh\n", mrioc->logging_level);
2004 static DEVICE_ATTR_RW(logging_level);
2007 * adp_state_show() - SysFS callback for adapter state show
2008 * @dev: class device
2009 * @attr: Device attributes
2010 * @buf: Buffer to copy
2012 * Return: sysfs_emit() return after copying adapter state
2015 adp_state_show(struct device *dev, struct device_attribute *attr,
2018 struct Scsi_Host *shost = class_to_shost(dev);
2019 struct mpi3mr_ioc *mrioc = shost_priv(shost);
2020 enum mpi3mr_iocstate ioc_state;
2023 ioc_state = mpi3mr_get_iocstate(mrioc);
2024 if (ioc_state == MRIOC_STATE_UNRECOVERABLE)
2025 adp_state = MPI3MR_BSG_ADPSTATE_UNRECOVERABLE;
2026 else if ((mrioc->reset_in_progress) || (mrioc->stop_bsgs))
2027 adp_state = MPI3MR_BSG_ADPSTATE_IN_RESET;
2028 else if (ioc_state == MRIOC_STATE_FAULT)
2029 adp_state = MPI3MR_BSG_ADPSTATE_FAULT;
2031 adp_state = MPI3MR_BSG_ADPSTATE_OPERATIONAL;
2033 return sysfs_emit(buf, "%u\n", adp_state);
2036 static DEVICE_ATTR_RO(adp_state);
2038 static struct attribute *mpi3mr_host_attrs[] = {
2039 &dev_attr_version_fw.attr,
2040 &dev_attr_fw_queue_depth.attr,
2041 &dev_attr_op_req_q_count.attr,
2042 &dev_attr_reply_queue_count.attr,
2043 &dev_attr_logging_level.attr,
2044 &dev_attr_adp_state.attr,
2048 static const struct attribute_group mpi3mr_host_attr_group = {
2049 .attrs = mpi3mr_host_attrs
2052 const struct attribute_group *mpi3mr_host_groups[] = {
2053 &mpi3mr_host_attr_group,
2059 * SCSI Device attributes under sysfs
2063 * sas_address_show - SysFS callback for dev SASaddress display
2064 * @dev: class device
2065 * @attr: Device attributes
2066 * @buf: Buffer to copy
2068 * Return: sysfs_emit() return after copying SAS address of the
2069 * specific SAS/SATA end device.
2072 sas_address_show(struct device *dev, struct device_attribute *attr,
2075 struct scsi_device *sdev = to_scsi_device(dev);
2076 struct mpi3mr_sdev_priv_data *sdev_priv_data;
2077 struct mpi3mr_stgt_priv_data *tgt_priv_data;
2078 struct mpi3mr_tgt_dev *tgtdev;
2080 sdev_priv_data = sdev->hostdata;
2081 if (!sdev_priv_data)
2084 tgt_priv_data = sdev_priv_data->tgt_priv_data;
2087 tgtdev = tgt_priv_data->tgt_dev;
2088 if (!tgtdev || tgtdev->dev_type != MPI3_DEVICE_DEVFORM_SAS_SATA)
2090 return sysfs_emit(buf, "0x%016llx\n",
2091 (unsigned long long)tgtdev->dev_spec.sas_sata_inf.sas_address);
2094 static DEVICE_ATTR_RO(sas_address);
2097 * device_handle_show - SysFS callback for device handle display
2098 * @dev: class device
2099 * @attr: Device attributes
2100 * @buf: Buffer to copy
2102 * Return: sysfs_emit() return after copying firmware internal
2103 * device handle of the specific device.
2106 device_handle_show(struct device *dev, struct device_attribute *attr,
2109 struct scsi_device *sdev = to_scsi_device(dev);
2110 struct mpi3mr_sdev_priv_data *sdev_priv_data;
2111 struct mpi3mr_stgt_priv_data *tgt_priv_data;
2112 struct mpi3mr_tgt_dev *tgtdev;
2114 sdev_priv_data = sdev->hostdata;
2115 if (!sdev_priv_data)
2118 tgt_priv_data = sdev_priv_data->tgt_priv_data;
2121 tgtdev = tgt_priv_data->tgt_dev;
2124 return sysfs_emit(buf, "0x%04x\n", tgtdev->dev_handle);
2127 static DEVICE_ATTR_RO(device_handle);
2130 * persistent_id_show - SysFS callback for persisten ID display
2131 * @dev: class device
2132 * @attr: Device attributes
2133 * @buf: Buffer to copy
2135 * Return: sysfs_emit() return after copying persistent ID of the
2136 * of the specific device.
2139 persistent_id_show(struct device *dev, struct device_attribute *attr,
2142 struct scsi_device *sdev = to_scsi_device(dev);
2143 struct mpi3mr_sdev_priv_data *sdev_priv_data;
2144 struct mpi3mr_stgt_priv_data *tgt_priv_data;
2145 struct mpi3mr_tgt_dev *tgtdev;
2147 sdev_priv_data = sdev->hostdata;
2148 if (!sdev_priv_data)
2151 tgt_priv_data = sdev_priv_data->tgt_priv_data;
2154 tgtdev = tgt_priv_data->tgt_dev;
2157 return sysfs_emit(buf, "%d\n", tgtdev->perst_id);
2159 static DEVICE_ATTR_RO(persistent_id);
2161 static struct attribute *mpi3mr_dev_attrs[] = {
2162 &dev_attr_sas_address.attr,
2163 &dev_attr_device_handle.attr,
2164 &dev_attr_persistent_id.attr,
2168 static const struct attribute_group mpi3mr_dev_attr_group = {
2169 .attrs = mpi3mr_dev_attrs
2172 const struct attribute_group *mpi3mr_dev_groups[] = {
2173 &mpi3mr_dev_attr_group,