1 // SPDX-License-Identifier: GPL-2.0
3 * Universal Flash Storage Host Performance Booster
5 * Copyright (C) 2017-2021 Samsung Electronics Co., Ltd.
8 * Yongmyung Lee <ymhungry.lee@samsung.com>
9 * Jinyoung Choi <j-young.choi@samsung.com>
12 #include <asm/unaligned.h>
13 #include <linux/async.h>
19 #define ACTIVATION_THRESHOLD 8 /* 8 IOs */
20 #define READ_TO_MS 1000
21 #define READ_TO_EXPIRIES 100
22 #define POLLING_INTERVAL_MS 200
23 #define THROTTLE_MAP_REQ_DEFAULT 1
25 /* memory management */
26 static struct kmem_cache *ufshpb_mctx_cache;
27 static mempool_t *ufshpb_mctx_pool;
28 static mempool_t *ufshpb_page_pool;
29 /* A cache size of 2MB can cache ppn in the 1GB range. */
30 static unsigned int ufshpb_host_map_kbytes = 2048;
31 static int tot_active_srgn_pages;
33 static struct workqueue_struct *ufshpb_wq;
35 static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
38 bool ufshpb_is_allowed(struct ufs_hba *hba)
40 return !(hba->ufshpb_dev.hpb_disabled);
43 /* HPB version 1.0 is called as legacy version. */
44 bool ufshpb_is_legacy(struct ufs_hba *hba)
46 return hba->ufshpb_dev.is_legacy;
49 static struct ufshpb_lu *ufshpb_get_hpb_data(struct scsi_device *sdev)
51 return sdev->hostdata;
54 static int ufshpb_get_state(struct ufshpb_lu *hpb)
56 return atomic_read(&hpb->hpb_state);
59 static void ufshpb_set_state(struct ufshpb_lu *hpb, int state)
61 atomic_set(&hpb->hpb_state, state);
64 static int ufshpb_is_valid_srgn(struct ufshpb_region *rgn,
65 struct ufshpb_subregion *srgn)
67 return rgn->rgn_state != HPB_RGN_INACTIVE &&
68 srgn->srgn_state == HPB_SRGN_VALID;
71 static bool ufshpb_is_read_cmd(struct scsi_cmnd *cmd)
73 return req_op(scsi_cmd_to_rq(cmd)) == REQ_OP_READ;
76 static bool ufshpb_is_write_or_discard(struct scsi_cmnd *cmd)
78 return op_is_write(req_op(scsi_cmd_to_rq(cmd))) ||
79 op_is_discard(req_op(scsi_cmd_to_rq(cmd)));
82 static bool ufshpb_is_supported_chunk(struct ufshpb_lu *hpb, int transfer_len)
84 return transfer_len <= hpb->pre_req_max_tr_len;
87 static bool ufshpb_is_general_lun(int lun)
89 return lun < UFS_UPIU_MAX_UNIT_NUM_ID;
92 static bool ufshpb_is_pinned_region(struct ufshpb_lu *hpb, int rgn_idx)
94 if (hpb->lu_pinned_end != PINNED_NOT_SET &&
95 rgn_idx >= hpb->lu_pinned_start &&
96 rgn_idx <= hpb->lu_pinned_end)
102 static void ufshpb_kick_map_work(struct ufshpb_lu *hpb)
107 if (ufshpb_get_state(hpb) != HPB_PRESENT)
110 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
111 if (!list_empty(&hpb->lh_inact_rgn) || !list_empty(&hpb->lh_act_srgn))
113 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
116 queue_work(ufshpb_wq, &hpb->map_work);
119 static bool ufshpb_is_hpb_rsp_valid(struct ufs_hba *hba,
120 struct ufshcd_lrb *lrbp,
121 struct utp_hpb_rsp *rsp_field)
123 /* Check HPB_UPDATE_ALERT */
124 if (!(lrbp->ucd_rsp_ptr->header.dword_2 &
125 UPIU_HEADER_DWORD(0, 2, 0, 0)))
128 if (be16_to_cpu(rsp_field->sense_data_len) != DEV_SENSE_SEG_LEN ||
129 rsp_field->desc_type != DEV_DES_TYPE ||
130 rsp_field->additional_len != DEV_ADDITIONAL_LEN ||
131 rsp_field->active_rgn_cnt > MAX_ACTIVE_NUM ||
132 rsp_field->inactive_rgn_cnt > MAX_INACTIVE_NUM ||
133 rsp_field->hpb_op == HPB_RSP_NONE ||
134 (rsp_field->hpb_op == HPB_RSP_REQ_REGION_UPDATE &&
135 !rsp_field->active_rgn_cnt && !rsp_field->inactive_rgn_cnt))
138 if (!ufshpb_is_general_lun(rsp_field->lun)) {
139 dev_warn(hba->dev, "ufshpb: lun(%d) not supported\n",
147 static void ufshpb_iterate_rgn(struct ufshpb_lu *hpb, int rgn_idx, int srgn_idx,
148 int srgn_offset, int cnt, bool set_dirty)
150 struct ufshpb_region *rgn;
151 struct ufshpb_subregion *srgn, *prev_srgn = NULL;
157 rgn = hpb->rgn_tbl + rgn_idx;
158 srgn = rgn->srgn_tbl + srgn_idx;
160 if (likely(!srgn->is_last))
161 bitmap_len = hpb->entries_per_srgn;
163 bitmap_len = hpb->last_srgn_entries;
165 if ((srgn_offset + cnt) > bitmap_len)
166 set_bit_len = bitmap_len - srgn_offset;
170 spin_lock_irqsave(&hpb->rgn_state_lock, flags);
171 if (rgn->rgn_state != HPB_RGN_INACTIVE) {
173 if (srgn->srgn_state == HPB_SRGN_VALID)
174 bitmap_set(srgn->mctx->ppn_dirty, srgn_offset,
176 } else if (hpb->is_hcm) {
177 /* rewind the read timer for lru regions */
178 rgn->read_timeout = ktime_add_ms(ktime_get(),
179 rgn->hpb->params.read_timeout_ms);
180 rgn->read_timeout_expiries =
181 rgn->hpb->params.read_timeout_expiries;
184 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
186 if (hpb->is_hcm && prev_srgn != srgn) {
187 bool activate = false;
189 spin_lock(&rgn->rgn_lock);
191 rgn->reads -= srgn->reads;
193 set_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
197 if (srgn->reads == hpb->params.activation_thld)
200 spin_unlock(&rgn->rgn_lock);
203 test_and_clear_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags)) {
204 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
205 ufshpb_update_active_info(hpb, rgn_idx, srgn_idx);
206 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
207 dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
208 "activate region %d-%d\n", rgn_idx, srgn_idx);
215 if (++srgn_idx == hpb->srgns_per_rgn) {
225 static bool ufshpb_test_ppn_dirty(struct ufshpb_lu *hpb, int rgn_idx,
226 int srgn_idx, int srgn_offset, int cnt)
228 struct ufshpb_region *rgn;
229 struct ufshpb_subregion *srgn;
234 rgn = hpb->rgn_tbl + rgn_idx;
235 srgn = rgn->srgn_tbl + srgn_idx;
237 if (likely(!srgn->is_last))
238 bitmap_len = hpb->entries_per_srgn;
240 bitmap_len = hpb->last_srgn_entries;
242 if (!ufshpb_is_valid_srgn(rgn, srgn))
246 * If the region state is active, mctx must be allocated.
247 * In this case, check whether the region is evicted or
248 * mctx allocation fail.
250 if (unlikely(!srgn->mctx)) {
251 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
252 "no mctx in region %d subregion %d.\n",
253 srgn->rgn_idx, srgn->srgn_idx);
257 if ((srgn_offset + cnt) > bitmap_len)
258 bit_len = bitmap_len - srgn_offset;
262 if (find_next_bit(srgn->mctx->ppn_dirty, bit_len + srgn_offset,
263 srgn_offset) < bit_len + srgn_offset)
267 if (++srgn_idx == hpb->srgns_per_rgn) {
279 static inline bool is_rgn_dirty(struct ufshpb_region *rgn)
281 return test_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
284 static int ufshpb_fill_ppn_from_page(struct ufshpb_lu *hpb,
285 struct ufshpb_map_ctx *mctx, int pos,
286 int len, __be64 *ppn_buf)
292 index = pos / (PAGE_SIZE / HPB_ENTRY_SIZE);
293 offset = pos % (PAGE_SIZE / HPB_ENTRY_SIZE);
295 if ((offset + len) <= (PAGE_SIZE / HPB_ENTRY_SIZE))
298 copied = (PAGE_SIZE / HPB_ENTRY_SIZE) - offset;
300 page = mctx->m_page[index];
301 if (unlikely(!page)) {
302 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
303 "error. cannot find page in mctx\n");
307 memcpy(ppn_buf, page_address(page) + (offset * HPB_ENTRY_SIZE),
308 copied * HPB_ENTRY_SIZE);
314 ufshpb_get_pos_from_lpn(struct ufshpb_lu *hpb, unsigned long lpn, int *rgn_idx,
315 int *srgn_idx, int *offset)
319 *rgn_idx = lpn >> hpb->entries_per_rgn_shift;
320 rgn_offset = lpn & hpb->entries_per_rgn_mask;
321 *srgn_idx = rgn_offset >> hpb->entries_per_srgn_shift;
322 *offset = rgn_offset & hpb->entries_per_srgn_mask;
326 ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
327 __be64 ppn, u8 transfer_len)
329 unsigned char *cdb = lrbp->cmd->cmnd;
330 __be64 ppn_tmp = ppn;
331 cdb[0] = UFSHPB_READ;
333 if (hba->dev_quirks & UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ)
334 ppn_tmp = swab64(ppn);
336 /* ppn value is stored as big-endian in the host memory */
337 memcpy(&cdb[6], &ppn_tmp, sizeof(__be64));
338 cdb[14] = transfer_len;
341 lrbp->cmd->cmd_len = UFS_CDB_SIZE;
345 * This function will set up HPB read command using host-side L2P map data.
347 int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
349 struct ufshpb_lu *hpb;
350 struct ufshpb_region *rgn;
351 struct ufshpb_subregion *srgn;
352 struct scsi_cmnd *cmd = lrbp->cmd;
356 int transfer_len, rgn_idx, srgn_idx, srgn_offset;
359 hpb = ufshpb_get_hpb_data(cmd->device);
363 if (ufshpb_get_state(hpb) == HPB_INIT)
366 if (ufshpb_get_state(hpb) != HPB_PRESENT) {
367 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
368 "%s: ufshpb state is not PRESENT", __func__);
372 if (blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)) ||
373 (!ufshpb_is_write_or_discard(cmd) &&
374 !ufshpb_is_read_cmd(cmd)))
377 transfer_len = sectors_to_logical(cmd->device,
378 blk_rq_sectors(scsi_cmd_to_rq(cmd)));
379 if (unlikely(!transfer_len))
382 lpn = sectors_to_logical(cmd->device, blk_rq_pos(scsi_cmd_to_rq(cmd)));
383 ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset);
384 rgn = hpb->rgn_tbl + rgn_idx;
385 srgn = rgn->srgn_tbl + srgn_idx;
387 /* If command type is WRITE or DISCARD, set bitmap as drity */
388 if (ufshpb_is_write_or_discard(cmd)) {
389 ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
394 if (!ufshpb_is_supported_chunk(hpb, transfer_len))
399 * in host control mode, reads are the main source for
402 ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
403 transfer_len, false);
405 /* keep those counters normalized */
406 if (rgn->reads > hpb->entries_per_srgn)
407 schedule_work(&hpb->ufshpb_normalization_work);
410 spin_lock_irqsave(&hpb->rgn_state_lock, flags);
411 if (ufshpb_test_ppn_dirty(hpb, rgn_idx, srgn_idx, srgn_offset,
413 hpb->stats.miss_cnt++;
414 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
418 err = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset, 1, &ppn);
419 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
420 if (unlikely(err < 0)) {
422 * In this case, the region state is active,
423 * but the ppn table is not allocated.
424 * Make sure that ppn table must be allocated on
427 dev_err(hba->dev, "get ppn failed. err %d\n", err);
431 ufshpb_set_hpb_read_to_upiu(hba, lrbp, ppn, transfer_len);
433 hpb->stats.hit_cnt++;
437 static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb,
438 int rgn_idx, enum req_opf dir,
441 struct ufshpb_req *rq;
443 int retries = HPB_MAP_REQ_RETRIES;
445 rq = kmem_cache_alloc(hpb->map_req_cache, GFP_KERNEL);
450 req = blk_get_request(hpb->sdev_ufs_lu->request_queue, dir,
453 if (!atomic && (PTR_ERR(req) == -EWOULDBLOCK) && (--retries > 0)) {
454 usleep_range(3000, 3100);
463 rq->rb.rgn_idx = rgn_idx;
468 kmem_cache_free(hpb->map_req_cache, rq);
472 static void ufshpb_put_req(struct ufshpb_lu *hpb, struct ufshpb_req *rq)
474 blk_put_request(rq->req);
475 kmem_cache_free(hpb->map_req_cache, rq);
478 static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb,
479 struct ufshpb_subregion *srgn)
481 struct ufshpb_req *map_req;
486 hpb->num_inflight_map_req >= hpb->params.inflight_map_req) {
487 dev_info(&hpb->sdev_ufs_lu->sdev_dev,
488 "map_req throttle. inflight %d throttle %d",
489 hpb->num_inflight_map_req,
490 hpb->params.inflight_map_req);
494 map_req = ufshpb_get_req(hpb, srgn->rgn_idx, REQ_OP_DRV_IN, false);
498 bio = bio_alloc(GFP_KERNEL, hpb->pages_per_srgn);
500 ufshpb_put_req(hpb, map_req);
506 map_req->rb.srgn_idx = srgn->srgn_idx;
507 map_req->rb.mctx = srgn->mctx;
509 spin_lock_irqsave(&hpb->param_lock, flags);
510 hpb->num_inflight_map_req++;
511 spin_unlock_irqrestore(&hpb->param_lock, flags);
516 static void ufshpb_put_map_req(struct ufshpb_lu *hpb,
517 struct ufshpb_req *map_req)
521 bio_put(map_req->bio);
522 ufshpb_put_req(hpb, map_req);
524 spin_lock_irqsave(&hpb->param_lock, flags);
525 hpb->num_inflight_map_req--;
526 spin_unlock_irqrestore(&hpb->param_lock, flags);
529 static int ufshpb_clear_dirty_bitmap(struct ufshpb_lu *hpb,
530 struct ufshpb_subregion *srgn)
532 struct ufshpb_region *rgn;
533 u32 num_entries = hpb->entries_per_srgn;
536 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
537 "no mctx in region %d subregion %d.\n",
538 srgn->rgn_idx, srgn->srgn_idx);
542 if (unlikely(srgn->is_last))
543 num_entries = hpb->last_srgn_entries;
545 bitmap_zero(srgn->mctx->ppn_dirty, num_entries);
547 rgn = hpb->rgn_tbl + srgn->rgn_idx;
548 clear_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
553 static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
556 struct ufshpb_region *rgn;
557 struct ufshpb_subregion *srgn;
559 rgn = hpb->rgn_tbl + rgn_idx;
560 srgn = rgn->srgn_tbl + srgn_idx;
562 list_del_init(&rgn->list_inact_rgn);
564 if (list_empty(&srgn->list_act_srgn))
565 list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
567 hpb->stats.rb_active_cnt++;
570 static void ufshpb_update_inactive_info(struct ufshpb_lu *hpb, int rgn_idx)
572 struct ufshpb_region *rgn;
573 struct ufshpb_subregion *srgn;
576 rgn = hpb->rgn_tbl + rgn_idx;
578 for_each_sub_region(rgn, srgn_idx, srgn)
579 list_del_init(&srgn->list_act_srgn);
581 if (list_empty(&rgn->list_inact_rgn))
582 list_add_tail(&rgn->list_inact_rgn, &hpb->lh_inact_rgn);
584 hpb->stats.rb_inactive_cnt++;
587 static void ufshpb_activate_subregion(struct ufshpb_lu *hpb,
588 struct ufshpb_subregion *srgn)
590 struct ufshpb_region *rgn;
593 * If there is no mctx in subregion
594 * after I/O progress for HPB_READ_BUFFER, the region to which the
595 * subregion belongs was evicted.
596 * Make sure the region must not evict in I/O progress
599 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
600 "no mctx in region %d subregion %d.\n",
601 srgn->rgn_idx, srgn->srgn_idx);
602 srgn->srgn_state = HPB_SRGN_INVALID;
606 rgn = hpb->rgn_tbl + srgn->rgn_idx;
608 if (unlikely(rgn->rgn_state == HPB_RGN_INACTIVE)) {
609 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
610 "region %d subregion %d evicted\n",
611 srgn->rgn_idx, srgn->srgn_idx);
612 srgn->srgn_state = HPB_SRGN_INVALID;
615 srgn->srgn_state = HPB_SRGN_VALID;
618 static void ufshpb_umap_req_compl_fn(struct request *req, blk_status_t error)
620 struct ufshpb_req *umap_req = (struct ufshpb_req *)req->end_io_data;
622 ufshpb_put_req(umap_req->hpb, umap_req);
625 static void ufshpb_map_req_compl_fn(struct request *req, blk_status_t error)
627 struct ufshpb_req *map_req = (struct ufshpb_req *) req->end_io_data;
628 struct ufshpb_lu *hpb = map_req->hpb;
629 struct ufshpb_subregion *srgn;
632 srgn = hpb->rgn_tbl[map_req->rb.rgn_idx].srgn_tbl +
633 map_req->rb.srgn_idx;
635 ufshpb_clear_dirty_bitmap(hpb, srgn);
636 spin_lock_irqsave(&hpb->rgn_state_lock, flags);
637 ufshpb_activate_subregion(hpb, srgn);
638 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
640 ufshpb_put_map_req(map_req->hpb, map_req);
643 static void ufshpb_set_unmap_cmd(unsigned char *cdb, struct ufshpb_region *rgn)
645 cdb[0] = UFSHPB_WRITE_BUFFER;
646 cdb[1] = rgn ? UFSHPB_WRITE_BUFFER_INACT_SINGLE_ID :
647 UFSHPB_WRITE_BUFFER_INACT_ALL_ID;
649 put_unaligned_be16(rgn->rgn_idx, &cdb[2]);
653 static void ufshpb_set_read_buf_cmd(unsigned char *cdb, int rgn_idx,
654 int srgn_idx, int srgn_mem_size)
656 cdb[0] = UFSHPB_READ_BUFFER;
657 cdb[1] = UFSHPB_READ_BUFFER_ID;
659 put_unaligned_be16(rgn_idx, &cdb[2]);
660 put_unaligned_be16(srgn_idx, &cdb[4]);
661 put_unaligned_be24(srgn_mem_size, &cdb[6]);
666 static void ufshpb_execute_umap_req(struct ufshpb_lu *hpb,
667 struct ufshpb_req *umap_req,
668 struct ufshpb_region *rgn)
671 struct scsi_request *rq;
675 req->end_io_data = (void *)umap_req;
677 ufshpb_set_unmap_cmd(rq->cmd, rgn);
678 rq->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH;
680 blk_execute_rq_nowait(NULL, req, 1, ufshpb_umap_req_compl_fn);
682 hpb->stats.umap_req_cnt++;
685 static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
686 struct ufshpb_req *map_req, bool last)
688 struct request_queue *q;
690 struct scsi_request *rq;
691 int mem_size = hpb->srgn_mem_size;
695 q = hpb->sdev_ufs_lu->request_queue;
696 for (i = 0; i < hpb->pages_per_srgn; i++) {
697 ret = bio_add_pc_page(q, map_req->bio, map_req->rb.mctx->m_page[i],
699 if (ret != PAGE_SIZE) {
700 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
701 "bio_add_pc_page fail %d - %d\n",
702 map_req->rb.rgn_idx, map_req->rb.srgn_idx);
709 blk_rq_append_bio(req, map_req->bio);
711 req->end_io_data = map_req;
716 mem_size = hpb->last_srgn_entries * HPB_ENTRY_SIZE;
718 ufshpb_set_read_buf_cmd(rq->cmd, map_req->rb.rgn_idx,
719 map_req->rb.srgn_idx, mem_size);
720 rq->cmd_len = HPB_READ_BUFFER_CMD_LENGTH;
722 blk_execute_rq_nowait(NULL, req, 1, ufshpb_map_req_compl_fn);
724 hpb->stats.map_req_cnt++;
728 static struct ufshpb_map_ctx *ufshpb_get_map_ctx(struct ufshpb_lu *hpb,
731 struct ufshpb_map_ctx *mctx;
732 u32 num_entries = hpb->entries_per_srgn;
735 mctx = mempool_alloc(ufshpb_mctx_pool, GFP_KERNEL);
739 mctx->m_page = kmem_cache_alloc(hpb->m_page_cache, GFP_KERNEL);
744 num_entries = hpb->last_srgn_entries;
746 mctx->ppn_dirty = bitmap_zalloc(num_entries, GFP_KERNEL);
747 if (!mctx->ppn_dirty)
750 for (i = 0; i < hpb->pages_per_srgn; i++) {
751 mctx->m_page[i] = mempool_alloc(ufshpb_page_pool, GFP_KERNEL);
752 if (!mctx->m_page[i]) {
753 for (j = 0; j < i; j++)
754 mempool_free(mctx->m_page[j], ufshpb_page_pool);
755 goto release_ppn_dirty;
757 clear_page(page_address(mctx->m_page[i]));
763 bitmap_free(mctx->ppn_dirty);
765 kmem_cache_free(hpb->m_page_cache, mctx->m_page);
767 mempool_free(mctx, ufshpb_mctx_pool);
771 static void ufshpb_put_map_ctx(struct ufshpb_lu *hpb,
772 struct ufshpb_map_ctx *mctx)
776 for (i = 0; i < hpb->pages_per_srgn; i++)
777 mempool_free(mctx->m_page[i], ufshpb_page_pool);
779 bitmap_free(mctx->ppn_dirty);
780 kmem_cache_free(hpb->m_page_cache, mctx->m_page);
781 mempool_free(mctx, ufshpb_mctx_pool);
784 static int ufshpb_check_srgns_issue_state(struct ufshpb_lu *hpb,
785 struct ufshpb_region *rgn)
787 struct ufshpb_subregion *srgn;
790 for_each_sub_region(rgn, srgn_idx, srgn)
791 if (srgn->srgn_state == HPB_SRGN_ISSUED)
797 static void ufshpb_read_to_handler(struct work_struct *work)
799 struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
800 ufshpb_read_to_work.work);
801 struct victim_select_info *lru_info = &hpb->lru_info;
802 struct ufshpb_region *rgn, *next_rgn;
805 LIST_HEAD(expired_list);
807 if (test_and_set_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits))
810 spin_lock_irqsave(&hpb->rgn_state_lock, flags);
812 list_for_each_entry_safe(rgn, next_rgn, &lru_info->lh_lru_rgn,
814 bool timedout = ktime_after(ktime_get(), rgn->read_timeout);
817 rgn->read_timeout_expiries--;
818 if (is_rgn_dirty(rgn) ||
819 rgn->read_timeout_expiries == 0)
820 list_add(&rgn->list_expired_rgn, &expired_list);
822 rgn->read_timeout = ktime_add_ms(ktime_get(),
823 hpb->params.read_timeout_ms);
827 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
829 list_for_each_entry_safe(rgn, next_rgn, &expired_list,
831 list_del_init(&rgn->list_expired_rgn);
832 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
833 ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
834 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
837 ufshpb_kick_map_work(hpb);
839 clear_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits);
841 poll = hpb->params.timeout_polling_interval_ms;
842 schedule_delayed_work(&hpb->ufshpb_read_to_work,
843 msecs_to_jiffies(poll));
846 static void ufshpb_add_lru_info(struct victim_select_info *lru_info,
847 struct ufshpb_region *rgn)
849 rgn->rgn_state = HPB_RGN_ACTIVE;
850 list_add_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn);
851 atomic_inc(&lru_info->active_cnt);
852 if (rgn->hpb->is_hcm) {
854 ktime_add_ms(ktime_get(),
855 rgn->hpb->params.read_timeout_ms);
856 rgn->read_timeout_expiries =
857 rgn->hpb->params.read_timeout_expiries;
861 static void ufshpb_hit_lru_info(struct victim_select_info *lru_info,
862 struct ufshpb_region *rgn)
864 list_move_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn);
867 static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb)
869 struct victim_select_info *lru_info = &hpb->lru_info;
870 struct ufshpb_region *rgn, *victim_rgn = NULL;
872 list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn) {
873 if (ufshpb_check_srgns_issue_state(hpb, rgn))
877 * in host control mode, verify that the exiting region
881 rgn->reads > hpb->params.eviction_thld_exit)
889 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
890 "%s: no region allocated\n",
896 static void ufshpb_cleanup_lru_info(struct victim_select_info *lru_info,
897 struct ufshpb_region *rgn)
899 list_del_init(&rgn->list_lru_rgn);
900 rgn->rgn_state = HPB_RGN_INACTIVE;
901 atomic_dec(&lru_info->active_cnt);
904 static void ufshpb_purge_active_subregion(struct ufshpb_lu *hpb,
905 struct ufshpb_subregion *srgn)
907 if (srgn->srgn_state != HPB_SRGN_UNUSED) {
908 ufshpb_put_map_ctx(hpb, srgn->mctx);
909 srgn->srgn_state = HPB_SRGN_UNUSED;
914 static int ufshpb_issue_umap_req(struct ufshpb_lu *hpb,
915 struct ufshpb_region *rgn,
918 struct ufshpb_req *umap_req;
919 int rgn_idx = rgn ? rgn->rgn_idx : 0;
921 umap_req = ufshpb_get_req(hpb, rgn_idx, REQ_OP_DRV_OUT, atomic);
925 ufshpb_execute_umap_req(hpb, umap_req, rgn);
930 static int ufshpb_issue_umap_single_req(struct ufshpb_lu *hpb,
931 struct ufshpb_region *rgn)
933 return ufshpb_issue_umap_req(hpb, rgn, true);
936 static int ufshpb_issue_umap_all_req(struct ufshpb_lu *hpb)
938 return ufshpb_issue_umap_req(hpb, NULL, false);
941 static void __ufshpb_evict_region(struct ufshpb_lu *hpb,
942 struct ufshpb_region *rgn)
944 struct victim_select_info *lru_info;
945 struct ufshpb_subregion *srgn;
948 lru_info = &hpb->lru_info;
950 dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "evict region %d\n", rgn->rgn_idx);
952 ufshpb_cleanup_lru_info(lru_info, rgn);
954 for_each_sub_region(rgn, srgn_idx, srgn)
955 ufshpb_purge_active_subregion(hpb, srgn);
958 static int ufshpb_evict_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
963 spin_lock_irqsave(&hpb->rgn_state_lock, flags);
964 if (rgn->rgn_state == HPB_RGN_PINNED) {
965 dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
966 "pinned region cannot drop-out. region %d\n",
971 if (!list_empty(&rgn->list_lru_rgn)) {
972 if (ufshpb_check_srgns_issue_state(hpb, rgn)) {
978 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
979 ret = ufshpb_issue_umap_single_req(hpb, rgn);
980 spin_lock_irqsave(&hpb->rgn_state_lock, flags);
985 __ufshpb_evict_region(hpb, rgn);
988 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
992 static int ufshpb_issue_map_req(struct ufshpb_lu *hpb,
993 struct ufshpb_region *rgn,
994 struct ufshpb_subregion *srgn)
996 struct ufshpb_req *map_req;
1000 bool alloc_required = false;
1001 enum HPB_SRGN_STATE state = HPB_SRGN_INVALID;
1003 spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1005 if (ufshpb_get_state(hpb) != HPB_PRESENT) {
1006 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1007 "%s: ufshpb state is not PRESENT\n", __func__);
1011 if ((rgn->rgn_state == HPB_RGN_INACTIVE) &&
1012 (srgn->srgn_state == HPB_SRGN_INVALID)) {
1017 if (srgn->srgn_state == HPB_SRGN_UNUSED)
1018 alloc_required = true;
1021 * If the subregion is already ISSUED state,
1022 * a specific event (e.g., GC or wear-leveling, etc.) occurs in
1023 * the device and HPB response for map loading is received.
1024 * In this case, after finishing the HPB_READ_BUFFER,
1025 * the next HPB_READ_BUFFER is performed again to obtain the latest
1028 if (srgn->srgn_state == HPB_SRGN_ISSUED)
1031 srgn->srgn_state = HPB_SRGN_ISSUED;
1032 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1034 if (alloc_required) {
1035 srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last);
1037 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1038 "get map_ctx failed. region %d - %d\n",
1039 rgn->rgn_idx, srgn->srgn_idx);
1040 state = HPB_SRGN_UNUSED;
1041 goto change_srgn_state;
1045 map_req = ufshpb_get_map_req(hpb, srgn);
1047 goto change_srgn_state;
1050 ret = ufshpb_execute_map_req(hpb, map_req, srgn->is_last);
1052 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1053 "%s: issue map_req failed: %d, region %d - %d\n",
1054 __func__, ret, srgn->rgn_idx, srgn->srgn_idx);
1060 ufshpb_put_map_req(hpb, map_req);
1062 spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1063 srgn->srgn_state = state;
1065 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1069 static int ufshpb_add_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
1071 struct ufshpb_region *victim_rgn = NULL;
1072 struct victim_select_info *lru_info = &hpb->lru_info;
1073 unsigned long flags;
1076 spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1078 * If region belongs to lru_list, just move the region
1079 * to the front of lru list because the state of the region
1080 * is already active-state.
1082 if (!list_empty(&rgn->list_lru_rgn)) {
1083 ufshpb_hit_lru_info(lru_info, rgn);
1087 if (rgn->rgn_state == HPB_RGN_INACTIVE) {
1088 if (atomic_read(&lru_info->active_cnt) ==
1089 lru_info->max_lru_active_cnt) {
1091 * If the maximum number of active regions
1092 * is exceeded, evict the least recently used region.
1093 * This case may occur when the device responds
1094 * to the eviction information late.
1095 * It is okay to evict the least recently used region,
1096 * because the device could detect this region
1097 * by not issuing HPB_READ
1099 * in host control mode, verify that the entering
1100 * region has enough reads
1103 rgn->reads < hpb->params.eviction_thld_enter) {
1108 victim_rgn = ufshpb_victim_lru_info(hpb);
1110 dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1111 "cannot get victim region %s\n",
1112 hpb->is_hcm ? "" : "error");
1117 dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
1118 "LRU full (%d), choose victim %d\n",
1119 atomic_read(&lru_info->active_cnt),
1120 victim_rgn->rgn_idx);
1123 spin_unlock_irqrestore(&hpb->rgn_state_lock,
1125 ret = ufshpb_issue_umap_single_req(hpb,
1127 spin_lock_irqsave(&hpb->rgn_state_lock,
1133 __ufshpb_evict_region(hpb, victim_rgn);
1137 * When a region is added to lru_info list_head,
1138 * it is guaranteed that the subregion has been
1139 * assigned all mctx. If failed, try to receive mctx again
1140 * without being added to lru_info list_head
1142 ufshpb_add_lru_info(lru_info, rgn);
1145 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1149 static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb,
1150 struct utp_hpb_rsp *rsp_field)
1152 struct ufshpb_region *rgn;
1153 struct ufshpb_subregion *srgn;
1154 int i, rgn_i, srgn_i;
1156 BUILD_BUG_ON(sizeof(struct ufshpb_active_field) != HPB_ACT_FIELD_SIZE);
1158 * If the active region and the inactive region are the same,
1159 * we will inactivate this region.
1160 * The device could check this (region inactivated) and
1161 * will response the proper active region information
1163 for (i = 0; i < rsp_field->active_rgn_cnt; i++) {
1165 be16_to_cpu(rsp_field->hpb_active_field[i].active_rgn);
1167 be16_to_cpu(rsp_field->hpb_active_field[i].active_srgn);
1169 rgn = hpb->rgn_tbl + rgn_i;
1171 (rgn->rgn_state != HPB_RGN_ACTIVE || is_rgn_dirty(rgn))) {
1173 * in host control mode, subregion activation
1174 * recommendations are only allowed to active regions.
1175 * Also, ignore recommendations for dirty regions - the
1176 * host will make decisions concerning those by himself
1181 dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
1182 "activate(%d) region %d - %d\n", i, rgn_i, srgn_i);
1184 spin_lock(&hpb->rsp_list_lock);
1185 ufshpb_update_active_info(hpb, rgn_i, srgn_i);
1186 spin_unlock(&hpb->rsp_list_lock);
1188 srgn = rgn->srgn_tbl + srgn_i;
1190 /* blocking HPB_READ */
1191 spin_lock(&hpb->rgn_state_lock);
1192 if (srgn->srgn_state == HPB_SRGN_VALID)
1193 srgn->srgn_state = HPB_SRGN_INVALID;
1194 spin_unlock(&hpb->rgn_state_lock);
1199 * in host control mode the device is not allowed to inactivate
1205 for (i = 0; i < rsp_field->inactive_rgn_cnt; i++) {
1206 rgn_i = be16_to_cpu(rsp_field->hpb_inactive_field[i]);
1207 dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
1208 "inactivate(%d) region %d\n", i, rgn_i);
1210 spin_lock(&hpb->rsp_list_lock);
1211 ufshpb_update_inactive_info(hpb, rgn_i);
1212 spin_unlock(&hpb->rsp_list_lock);
1214 rgn = hpb->rgn_tbl + rgn_i;
1216 spin_lock(&hpb->rgn_state_lock);
1217 if (rgn->rgn_state != HPB_RGN_INACTIVE) {
1218 for (srgn_i = 0; srgn_i < rgn->srgn_cnt; srgn_i++) {
1219 srgn = rgn->srgn_tbl + srgn_i;
1220 if (srgn->srgn_state == HPB_SRGN_VALID)
1221 srgn->srgn_state = HPB_SRGN_INVALID;
1224 spin_unlock(&hpb->rgn_state_lock);
1229 dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "Noti: #ACT %u #INACT %u\n",
1230 rsp_field->active_rgn_cnt, rsp_field->inactive_rgn_cnt);
1232 if (ufshpb_get_state(hpb) == HPB_PRESENT)
1233 queue_work(ufshpb_wq, &hpb->map_work);
1236 static void ufshpb_dev_reset_handler(struct ufshpb_lu *hpb)
1238 struct victim_select_info *lru_info = &hpb->lru_info;
1239 struct ufshpb_region *rgn;
1240 unsigned long flags;
1242 spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1244 list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn)
1245 set_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags);
1247 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1251 * This function will parse recommended active subregion information in sense
1252 * data field of response UPIU with SAM_STAT_GOOD state.
1254 void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1256 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(lrbp->cmd->device);
1257 struct utp_hpb_rsp *rsp_field = &lrbp->ucd_rsp_ptr->hr;
1260 data_seg_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2)
1261 & MASK_RSP_UPIU_DATA_SEG_LEN;
1263 /* If data segment length is zero, rsp_field is not valid */
1267 if (unlikely(lrbp->lun != rsp_field->lun)) {
1268 struct scsi_device *sdev;
1271 __shost_for_each_device(sdev, hba->host) {
1272 hpb = ufshpb_get_hpb_data(sdev);
1277 if (rsp_field->lun == hpb->lun) {
1290 if (ufshpb_get_state(hpb) == HPB_INIT)
1293 if ((ufshpb_get_state(hpb) != HPB_PRESENT) &&
1294 (ufshpb_get_state(hpb) != HPB_SUSPEND)) {
1295 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1296 "%s: ufshpb state is not PRESENT/SUSPEND\n",
1301 BUILD_BUG_ON(sizeof(struct utp_hpb_rsp) != UTP_HPB_RSP_SIZE);
1303 if (!ufshpb_is_hpb_rsp_valid(hba, lrbp, rsp_field))
1306 hpb->stats.rb_noti_cnt++;
1308 switch (rsp_field->hpb_op) {
1309 case HPB_RSP_REQ_REGION_UPDATE:
1310 if (data_seg_len != DEV_DATA_SEG_LEN)
1311 dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1312 "%s: data seg length is not same.\n",
1314 ufshpb_rsp_req_region_update(hpb, rsp_field);
1316 case HPB_RSP_DEV_RESET:
1317 dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1318 "UFS device lost HPB information during PM.\n");
1321 struct scsi_device *sdev;
1323 __shost_for_each_device(sdev, hba->host) {
1324 struct ufshpb_lu *h = sdev->hostdata;
1327 ufshpb_dev_reset_handler(h);
1333 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1334 "hpb_op is not available: %d\n",
1340 static void ufshpb_add_active_list(struct ufshpb_lu *hpb,
1341 struct ufshpb_region *rgn,
1342 struct ufshpb_subregion *srgn)
1344 if (!list_empty(&rgn->list_inact_rgn))
1347 if (!list_empty(&srgn->list_act_srgn)) {
1348 list_move(&srgn->list_act_srgn, &hpb->lh_act_srgn);
1352 list_add(&srgn->list_act_srgn, &hpb->lh_act_srgn);
1355 static void ufshpb_add_pending_evict_list(struct ufshpb_lu *hpb,
1356 struct ufshpb_region *rgn,
1357 struct list_head *pending_list)
1359 struct ufshpb_subregion *srgn;
1362 if (!list_empty(&rgn->list_inact_rgn))
1365 for_each_sub_region(rgn, srgn_idx, srgn)
1366 if (!list_empty(&srgn->list_act_srgn))
1369 list_add_tail(&rgn->list_inact_rgn, pending_list);
1372 static void ufshpb_run_active_subregion_list(struct ufshpb_lu *hpb)
1374 struct ufshpb_region *rgn;
1375 struct ufshpb_subregion *srgn;
1376 unsigned long flags;
1379 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1380 while ((srgn = list_first_entry_or_null(&hpb->lh_act_srgn,
1381 struct ufshpb_subregion,
1383 if (ufshpb_get_state(hpb) == HPB_SUSPEND)
1386 list_del_init(&srgn->list_act_srgn);
1387 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1389 rgn = hpb->rgn_tbl + srgn->rgn_idx;
1390 ret = ufshpb_add_region(hpb, rgn);
1394 ret = ufshpb_issue_map_req(hpb, rgn, srgn);
1396 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1397 "issue map_req failed. ret %d, region %d - %d\n",
1398 ret, rgn->rgn_idx, srgn->srgn_idx);
1401 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1403 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1407 dev_err(&hpb->sdev_ufs_lu->sdev_dev, "failed to activate region %d - %d, will retry\n",
1408 rgn->rgn_idx, srgn->srgn_idx);
1409 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1410 ufshpb_add_active_list(hpb, rgn, srgn);
1411 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1414 static void ufshpb_run_inactive_region_list(struct ufshpb_lu *hpb)
1416 struct ufshpb_region *rgn;
1417 unsigned long flags;
1419 LIST_HEAD(pending_list);
1421 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1422 while ((rgn = list_first_entry_or_null(&hpb->lh_inact_rgn,
1423 struct ufshpb_region,
1425 if (ufshpb_get_state(hpb) == HPB_SUSPEND)
1428 list_del_init(&rgn->list_inact_rgn);
1429 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1431 ret = ufshpb_evict_region(hpb, rgn);
1433 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1434 ufshpb_add_pending_evict_list(hpb, rgn, &pending_list);
1435 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1438 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1441 list_splice(&pending_list, &hpb->lh_inact_rgn);
1442 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1445 static void ufshpb_normalization_work_handler(struct work_struct *work)
1447 struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
1448 ufshpb_normalization_work);
1450 u8 factor = hpb->params.normalization_factor;
1452 for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
1453 struct ufshpb_region *rgn = hpb->rgn_tbl + rgn_idx;
1456 spin_lock(&rgn->rgn_lock);
1458 for (srgn_idx = 0; srgn_idx < hpb->srgns_per_rgn; srgn_idx++) {
1459 struct ufshpb_subregion *srgn = rgn->srgn_tbl + srgn_idx;
1461 srgn->reads >>= factor;
1462 rgn->reads += srgn->reads;
1464 spin_unlock(&rgn->rgn_lock);
1466 if (rgn->rgn_state != HPB_RGN_ACTIVE || rgn->reads)
1469 /* if region is active but has no reads - inactivate it */
1470 spin_lock(&hpb->rsp_list_lock);
1471 ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
1472 spin_unlock(&hpb->rsp_list_lock);
1476 static void ufshpb_map_work_handler(struct work_struct *work)
1478 struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu, map_work);
1480 if (ufshpb_get_state(hpb) != HPB_PRESENT) {
1481 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1482 "%s: ufshpb state is not PRESENT\n", __func__);
1486 ufshpb_run_inactive_region_list(hpb);
1487 ufshpb_run_active_subregion_list(hpb);
1491 * this function doesn't need to hold lock due to be called in init.
1492 * (rgn_state_lock, rsp_list_lock, etc..)
1494 static int ufshpb_init_pinned_active_region(struct ufs_hba *hba,
1495 struct ufshpb_lu *hpb,
1496 struct ufshpb_region *rgn)
1498 struct ufshpb_subregion *srgn;
1502 for_each_sub_region(rgn, srgn_idx, srgn) {
1503 srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last);
1504 srgn->srgn_state = HPB_SRGN_INVALID;
1508 "alloc mctx for pinned region failed\n");
1512 list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
1515 rgn->rgn_state = HPB_RGN_PINNED;
1519 for (i = 0; i < srgn_idx; i++) {
1520 srgn = rgn->srgn_tbl + i;
1521 ufshpb_put_map_ctx(hpb, srgn->mctx);
1526 static void ufshpb_init_subregion_tbl(struct ufshpb_lu *hpb,
1527 struct ufshpb_region *rgn, bool last)
1530 struct ufshpb_subregion *srgn;
1532 for_each_sub_region(rgn, srgn_idx, srgn) {
1533 INIT_LIST_HEAD(&srgn->list_act_srgn);
1535 srgn->rgn_idx = rgn->rgn_idx;
1536 srgn->srgn_idx = srgn_idx;
1537 srgn->srgn_state = HPB_SRGN_UNUSED;
1540 if (unlikely(last && hpb->last_srgn_entries))
1541 srgn->is_last = true;
1544 static int ufshpb_alloc_subregion_tbl(struct ufshpb_lu *hpb,
1545 struct ufshpb_region *rgn, int srgn_cnt)
1547 rgn->srgn_tbl = kvcalloc(srgn_cnt, sizeof(struct ufshpb_subregion),
1552 rgn->srgn_cnt = srgn_cnt;
1556 static void ufshpb_lu_parameter_init(struct ufs_hba *hba,
1557 struct ufshpb_lu *hpb,
1558 struct ufshpb_dev_info *hpb_dev_info,
1559 struct ufshpb_lu_info *hpb_lu_info)
1561 u32 entries_per_rgn;
1562 u64 rgn_mem_size, tmp;
1564 if (ufshpb_is_legacy(hba))
1565 hpb->pre_req_max_tr_len = HPB_LEGACY_CHUNK_HIGH;
1567 hpb->pre_req_max_tr_len = hpb_dev_info->max_hpb_single_cmd;
1569 hpb->lu_pinned_start = hpb_lu_info->pinned_start;
1570 hpb->lu_pinned_end = hpb_lu_info->num_pinned ?
1571 (hpb_lu_info->pinned_start + hpb_lu_info->num_pinned - 1)
1573 hpb->lru_info.max_lru_active_cnt =
1574 hpb_lu_info->max_active_rgns - hpb_lu_info->num_pinned;
1576 rgn_mem_size = (1ULL << hpb_dev_info->rgn_size) * HPB_RGN_SIZE_UNIT
1578 do_div(rgn_mem_size, HPB_ENTRY_BLOCK_SIZE);
1579 hpb->srgn_mem_size = (1ULL << hpb_dev_info->srgn_size)
1580 * HPB_RGN_SIZE_UNIT / HPB_ENTRY_BLOCK_SIZE * HPB_ENTRY_SIZE;
1583 do_div(tmp, HPB_ENTRY_SIZE);
1584 entries_per_rgn = (u32)tmp;
1585 hpb->entries_per_rgn_shift = ilog2(entries_per_rgn);
1586 hpb->entries_per_rgn_mask = entries_per_rgn - 1;
1588 hpb->entries_per_srgn = hpb->srgn_mem_size / HPB_ENTRY_SIZE;
1589 hpb->entries_per_srgn_shift = ilog2(hpb->entries_per_srgn);
1590 hpb->entries_per_srgn_mask = hpb->entries_per_srgn - 1;
1593 do_div(tmp, hpb->srgn_mem_size);
1594 hpb->srgns_per_rgn = (int)tmp;
1596 hpb->rgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks,
1598 hpb->srgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks,
1599 (hpb->srgn_mem_size / HPB_ENTRY_SIZE));
1600 hpb->last_srgn_entries = hpb_lu_info->num_blocks
1601 % (hpb->srgn_mem_size / HPB_ENTRY_SIZE);
1603 hpb->pages_per_srgn = DIV_ROUND_UP(hpb->srgn_mem_size, PAGE_SIZE);
1605 if (hpb_dev_info->control_mode == HPB_HOST_CONTROL)
1609 static int ufshpb_alloc_region_tbl(struct ufs_hba *hba, struct ufshpb_lu *hpb)
1611 struct ufshpb_region *rgn_table, *rgn;
1615 rgn_table = kvcalloc(hpb->rgns_per_lu, sizeof(struct ufshpb_region),
1620 for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
1621 int srgn_cnt = hpb->srgns_per_rgn;
1622 bool last_srgn = false;
1624 rgn = rgn_table + rgn_idx;
1625 rgn->rgn_idx = rgn_idx;
1627 spin_lock_init(&rgn->rgn_lock);
1629 INIT_LIST_HEAD(&rgn->list_inact_rgn);
1630 INIT_LIST_HEAD(&rgn->list_lru_rgn);
1631 INIT_LIST_HEAD(&rgn->list_expired_rgn);
1633 if (rgn_idx == hpb->rgns_per_lu - 1) {
1634 srgn_cnt = ((hpb->srgns_per_lu - 1) %
1635 hpb->srgns_per_rgn) + 1;
1639 ret = ufshpb_alloc_subregion_tbl(hpb, rgn, srgn_cnt);
1641 goto release_srgn_table;
1642 ufshpb_init_subregion_tbl(hpb, rgn, last_srgn);
1644 if (ufshpb_is_pinned_region(hpb, rgn_idx)) {
1645 ret = ufshpb_init_pinned_active_region(hba, hpb, rgn);
1647 goto release_srgn_table;
1649 rgn->rgn_state = HPB_RGN_INACTIVE;
1656 hpb->rgn_tbl = rgn_table;
1661 for (i = 0; i <= rgn_idx; i++)
1662 kvfree(rgn_table[i].srgn_tbl);
1668 static void ufshpb_destroy_subregion_tbl(struct ufshpb_lu *hpb,
1669 struct ufshpb_region *rgn)
1672 struct ufshpb_subregion *srgn;
1674 for_each_sub_region(rgn, srgn_idx, srgn)
1675 if (srgn->srgn_state != HPB_SRGN_UNUSED) {
1676 srgn->srgn_state = HPB_SRGN_UNUSED;
1677 ufshpb_put_map_ctx(hpb, srgn->mctx);
1681 static void ufshpb_destroy_region_tbl(struct ufshpb_lu *hpb)
1685 for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
1686 struct ufshpb_region *rgn;
1688 rgn = hpb->rgn_tbl + rgn_idx;
1689 if (rgn->rgn_state != HPB_RGN_INACTIVE) {
1690 rgn->rgn_state = HPB_RGN_INACTIVE;
1692 ufshpb_destroy_subregion_tbl(hpb, rgn);
1695 kvfree(rgn->srgn_tbl);
1698 kvfree(hpb->rgn_tbl);
1701 /* SYSFS functions */
1702 #define ufshpb_sysfs_attr_show_func(__name) \
1703 static ssize_t __name##_show(struct device *dev, \
1704 struct device_attribute *attr, char *buf) \
1706 struct scsi_device *sdev = to_scsi_device(dev); \
1707 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); \
1712 return sysfs_emit(buf, "%llu\n", hpb->stats.__name); \
1715 static DEVICE_ATTR_RO(__name)
1717 ufshpb_sysfs_attr_show_func(hit_cnt);
1718 ufshpb_sysfs_attr_show_func(miss_cnt);
1719 ufshpb_sysfs_attr_show_func(rb_noti_cnt);
1720 ufshpb_sysfs_attr_show_func(rb_active_cnt);
1721 ufshpb_sysfs_attr_show_func(rb_inactive_cnt);
1722 ufshpb_sysfs_attr_show_func(map_req_cnt);
1723 ufshpb_sysfs_attr_show_func(umap_req_cnt);
1725 static struct attribute *hpb_dev_stat_attrs[] = {
1726 &dev_attr_hit_cnt.attr,
1727 &dev_attr_miss_cnt.attr,
1728 &dev_attr_rb_noti_cnt.attr,
1729 &dev_attr_rb_active_cnt.attr,
1730 &dev_attr_rb_inactive_cnt.attr,
1731 &dev_attr_map_req_cnt.attr,
1732 &dev_attr_umap_req_cnt.attr,
1736 struct attribute_group ufs_sysfs_hpb_stat_group = {
1737 .name = "hpb_stats",
1738 .attrs = hpb_dev_stat_attrs,
1741 /* SYSFS functions */
1742 #define ufshpb_sysfs_param_show_func(__name) \
1743 static ssize_t __name##_show(struct device *dev, \
1744 struct device_attribute *attr, char *buf) \
1746 struct scsi_device *sdev = to_scsi_device(dev); \
1747 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); \
1752 return sysfs_emit(buf, "%d\n", hpb->params.__name); \
1755 ufshpb_sysfs_param_show_func(requeue_timeout_ms);
1757 requeue_timeout_ms_store(struct device *dev, struct device_attribute *attr,
1758 const char *buf, size_t count)
1760 struct scsi_device *sdev = to_scsi_device(dev);
1761 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1767 if (kstrtouint(buf, 0, &val))
1773 hpb->params.requeue_timeout_ms = val;
1777 static DEVICE_ATTR_RW(requeue_timeout_ms);
1779 ufshpb_sysfs_param_show_func(activation_thld);
1781 activation_thld_store(struct device *dev, struct device_attribute *attr,
1782 const char *buf, size_t count)
1784 struct scsi_device *sdev = to_scsi_device(dev);
1785 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1794 if (kstrtouint(buf, 0, &val))
1800 hpb->params.activation_thld = val;
1804 static DEVICE_ATTR_RW(activation_thld);
1806 ufshpb_sysfs_param_show_func(normalization_factor);
1808 normalization_factor_store(struct device *dev, struct device_attribute *attr,
1809 const char *buf, size_t count)
1811 struct scsi_device *sdev = to_scsi_device(dev);
1812 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1821 if (kstrtouint(buf, 0, &val))
1824 if (val <= 0 || val > ilog2(hpb->entries_per_srgn))
1827 hpb->params.normalization_factor = val;
1831 static DEVICE_ATTR_RW(normalization_factor);
1833 ufshpb_sysfs_param_show_func(eviction_thld_enter);
1835 eviction_thld_enter_store(struct device *dev, struct device_attribute *attr,
1836 const char *buf, size_t count)
1838 struct scsi_device *sdev = to_scsi_device(dev);
1839 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1848 if (kstrtouint(buf, 0, &val))
1851 if (val <= hpb->params.eviction_thld_exit)
1854 hpb->params.eviction_thld_enter = val;
1858 static DEVICE_ATTR_RW(eviction_thld_enter);
1860 ufshpb_sysfs_param_show_func(eviction_thld_exit);
1862 eviction_thld_exit_store(struct device *dev, struct device_attribute *attr,
1863 const char *buf, size_t count)
1865 struct scsi_device *sdev = to_scsi_device(dev);
1866 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1875 if (kstrtouint(buf, 0, &val))
1878 if (val <= hpb->params.activation_thld)
1881 hpb->params.eviction_thld_exit = val;
1885 static DEVICE_ATTR_RW(eviction_thld_exit);
1887 ufshpb_sysfs_param_show_func(read_timeout_ms);
1889 read_timeout_ms_store(struct device *dev, struct device_attribute *attr,
1890 const char *buf, size_t count)
1892 struct scsi_device *sdev = to_scsi_device(dev);
1893 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1902 if (kstrtouint(buf, 0, &val))
1905 /* read_timeout >> timeout_polling_interval */
1906 if (val < hpb->params.timeout_polling_interval_ms * 2)
1909 hpb->params.read_timeout_ms = val;
1913 static DEVICE_ATTR_RW(read_timeout_ms);
1915 ufshpb_sysfs_param_show_func(read_timeout_expiries);
1917 read_timeout_expiries_store(struct device *dev, struct device_attribute *attr,
1918 const char *buf, size_t count)
1920 struct scsi_device *sdev = to_scsi_device(dev);
1921 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1930 if (kstrtouint(buf, 0, &val))
1936 hpb->params.read_timeout_expiries = val;
1940 static DEVICE_ATTR_RW(read_timeout_expiries);
1942 ufshpb_sysfs_param_show_func(timeout_polling_interval_ms);
1944 timeout_polling_interval_ms_store(struct device *dev,
1945 struct device_attribute *attr,
1946 const char *buf, size_t count)
1948 struct scsi_device *sdev = to_scsi_device(dev);
1949 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1958 if (kstrtouint(buf, 0, &val))
1961 /* timeout_polling_interval << read_timeout */
1962 if (val <= 0 || val > hpb->params.read_timeout_ms / 2)
1965 hpb->params.timeout_polling_interval_ms = val;
1969 static DEVICE_ATTR_RW(timeout_polling_interval_ms);
1971 ufshpb_sysfs_param_show_func(inflight_map_req);
1972 static ssize_t inflight_map_req_store(struct device *dev,
1973 struct device_attribute *attr,
1974 const char *buf, size_t count)
1976 struct scsi_device *sdev = to_scsi_device(dev);
1977 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1986 if (kstrtouint(buf, 0, &val))
1989 if (val <= 0 || val > hpb->sdev_ufs_lu->queue_depth - 1)
1992 hpb->params.inflight_map_req = val;
1996 static DEVICE_ATTR_RW(inflight_map_req);
1998 static void ufshpb_hcm_param_init(struct ufshpb_lu *hpb)
2000 hpb->params.activation_thld = ACTIVATION_THRESHOLD;
2001 hpb->params.normalization_factor = 1;
2002 hpb->params.eviction_thld_enter = (ACTIVATION_THRESHOLD << 5);
2003 hpb->params.eviction_thld_exit = (ACTIVATION_THRESHOLD << 4);
2004 hpb->params.read_timeout_ms = READ_TO_MS;
2005 hpb->params.read_timeout_expiries = READ_TO_EXPIRIES;
2006 hpb->params.timeout_polling_interval_ms = POLLING_INTERVAL_MS;
2007 hpb->params.inflight_map_req = THROTTLE_MAP_REQ_DEFAULT;
2010 static struct attribute *hpb_dev_param_attrs[] = {
2011 &dev_attr_requeue_timeout_ms.attr,
2012 &dev_attr_activation_thld.attr,
2013 &dev_attr_normalization_factor.attr,
2014 &dev_attr_eviction_thld_enter.attr,
2015 &dev_attr_eviction_thld_exit.attr,
2016 &dev_attr_read_timeout_ms.attr,
2017 &dev_attr_read_timeout_expiries.attr,
2018 &dev_attr_timeout_polling_interval_ms.attr,
2019 &dev_attr_inflight_map_req.attr,
2023 struct attribute_group ufs_sysfs_hpb_param_group = {
2024 .name = "hpb_params",
2025 .attrs = hpb_dev_param_attrs,
2028 static int ufshpb_pre_req_mempool_init(struct ufshpb_lu *hpb)
2030 struct ufshpb_req *pre_req = NULL, *t;
2031 int qd = hpb->sdev_ufs_lu->queue_depth / 2;
2034 INIT_LIST_HEAD(&hpb->lh_pre_req_free);
2036 hpb->pre_req = kcalloc(qd, sizeof(struct ufshpb_req), GFP_KERNEL);
2037 hpb->throttle_pre_req = qd;
2038 hpb->num_inflight_pre_req = 0;
2043 for (i = 0; i < qd; i++) {
2044 pre_req = hpb->pre_req + i;
2045 INIT_LIST_HEAD(&pre_req->list_req);
2046 pre_req->req = NULL;
2048 pre_req->bio = bio_alloc(GFP_KERNEL, 1);
2052 pre_req->wb.m_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2053 if (!pre_req->wb.m_page) {
2054 bio_put(pre_req->bio);
2058 list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free);
2063 list_for_each_entry_safe(pre_req, t, &hpb->lh_pre_req_free, list_req) {
2064 list_del_init(&pre_req->list_req);
2065 bio_put(pre_req->bio);
2066 __free_page(pre_req->wb.m_page);
2069 kfree(hpb->pre_req);
2073 static void ufshpb_pre_req_mempool_destroy(struct ufshpb_lu *hpb)
2075 struct ufshpb_req *pre_req = NULL;
2078 for (i = 0; i < hpb->throttle_pre_req; i++) {
2079 pre_req = hpb->pre_req + i;
2080 bio_put(hpb->pre_req[i].bio);
2081 if (!pre_req->wb.m_page)
2082 __free_page(hpb->pre_req[i].wb.m_page);
2083 list_del_init(&pre_req->list_req);
2086 kfree(hpb->pre_req);
2089 static void ufshpb_stat_init(struct ufshpb_lu *hpb)
2091 hpb->stats.hit_cnt = 0;
2092 hpb->stats.miss_cnt = 0;
2093 hpb->stats.rb_noti_cnt = 0;
2094 hpb->stats.rb_active_cnt = 0;
2095 hpb->stats.rb_inactive_cnt = 0;
2096 hpb->stats.map_req_cnt = 0;
2097 hpb->stats.umap_req_cnt = 0;
2100 static void ufshpb_param_init(struct ufshpb_lu *hpb)
2102 hpb->params.requeue_timeout_ms = HPB_REQUEUE_TIME_MS;
2104 ufshpb_hcm_param_init(hpb);
2107 static int ufshpb_lu_hpb_init(struct ufs_hba *hba, struct ufshpb_lu *hpb)
2111 spin_lock_init(&hpb->rgn_state_lock);
2112 spin_lock_init(&hpb->rsp_list_lock);
2113 spin_lock_init(&hpb->param_lock);
2115 INIT_LIST_HEAD(&hpb->lru_info.lh_lru_rgn);
2116 INIT_LIST_HEAD(&hpb->lh_act_srgn);
2117 INIT_LIST_HEAD(&hpb->lh_inact_rgn);
2118 INIT_LIST_HEAD(&hpb->list_hpb_lu);
2120 INIT_WORK(&hpb->map_work, ufshpb_map_work_handler);
2122 INIT_WORK(&hpb->ufshpb_normalization_work,
2123 ufshpb_normalization_work_handler);
2124 INIT_DELAYED_WORK(&hpb->ufshpb_read_to_work,
2125 ufshpb_read_to_handler);
2128 hpb->map_req_cache = kmem_cache_create("ufshpb_req_cache",
2129 sizeof(struct ufshpb_req), 0, 0, NULL);
2130 if (!hpb->map_req_cache) {
2131 dev_err(hba->dev, "ufshpb(%d) ufshpb_req_cache create fail",
2136 hpb->m_page_cache = kmem_cache_create("ufshpb_m_page_cache",
2137 sizeof(struct page *) * hpb->pages_per_srgn,
2139 if (!hpb->m_page_cache) {
2140 dev_err(hba->dev, "ufshpb(%d) ufshpb_m_page_cache create fail",
2143 goto release_req_cache;
2146 ret = ufshpb_pre_req_mempool_init(hpb);
2148 dev_err(hba->dev, "ufshpb(%d) pre_req_mempool init fail",
2150 goto release_m_page_cache;
2153 ret = ufshpb_alloc_region_tbl(hba, hpb);
2155 goto release_pre_req_mempool;
2157 ufshpb_stat_init(hpb);
2158 ufshpb_param_init(hpb);
2163 poll = hpb->params.timeout_polling_interval_ms;
2164 schedule_delayed_work(&hpb->ufshpb_read_to_work,
2165 msecs_to_jiffies(poll));
2170 release_pre_req_mempool:
2171 ufshpb_pre_req_mempool_destroy(hpb);
2172 release_m_page_cache:
2173 kmem_cache_destroy(hpb->m_page_cache);
2175 kmem_cache_destroy(hpb->map_req_cache);
2179 static struct ufshpb_lu *
2180 ufshpb_alloc_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev,
2181 struct ufshpb_dev_info *hpb_dev_info,
2182 struct ufshpb_lu_info *hpb_lu_info)
2184 struct ufshpb_lu *hpb;
2187 hpb = kzalloc(sizeof(struct ufshpb_lu), GFP_KERNEL);
2191 hpb->lun = sdev->lun;
2192 hpb->sdev_ufs_lu = sdev;
2194 ufshpb_lu_parameter_init(hba, hpb, hpb_dev_info, hpb_lu_info);
2196 ret = ufshpb_lu_hpb_init(hba, hpb);
2198 dev_err(hba->dev, "hpb lu init failed. ret %d", ret);
2202 sdev->hostdata = hpb;
2210 static void ufshpb_discard_rsp_lists(struct ufshpb_lu *hpb)
2212 struct ufshpb_region *rgn, *next_rgn;
2213 struct ufshpb_subregion *srgn, *next_srgn;
2214 unsigned long flags;
2217 * If the device reset occurred, the remaining HPB region information
2218 * may be stale. Therefore, by discarding the lists of HPB response
2219 * that remained after reset, we prevent unnecessary work.
2221 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
2222 list_for_each_entry_safe(rgn, next_rgn, &hpb->lh_inact_rgn,
2224 list_del_init(&rgn->list_inact_rgn);
2226 list_for_each_entry_safe(srgn, next_srgn, &hpb->lh_act_srgn,
2228 list_del_init(&srgn->list_act_srgn);
2229 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
2232 static void ufshpb_cancel_jobs(struct ufshpb_lu *hpb)
2235 cancel_delayed_work_sync(&hpb->ufshpb_read_to_work);
2236 cancel_work_sync(&hpb->ufshpb_normalization_work);
2238 cancel_work_sync(&hpb->map_work);
2241 static bool ufshpb_check_hpb_reset_query(struct ufs_hba *hba)
2244 bool flag_res = true;
2247 /* wait for the device to complete HPB reset query */
2248 for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) {
2250 "%s start flag reset polling %d times\n",
2253 /* Poll fHpbReset flag to be cleared */
2254 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
2255 QUERY_FLAG_IDN_HPB_RESET, 0, &flag_res);
2259 "%s reading fHpbReset flag failed with error %d\n",
2267 usleep_range(1000, 1100);
2271 "%s fHpbReset was not cleared by the device\n",
2278 void ufshpb_reset(struct ufs_hba *hba)
2280 struct ufshpb_lu *hpb;
2281 struct scsi_device *sdev;
2283 shost_for_each_device(sdev, hba->host) {
2284 hpb = ufshpb_get_hpb_data(sdev);
2288 if (ufshpb_get_state(hpb) != HPB_RESET)
2291 ufshpb_set_state(hpb, HPB_PRESENT);
2295 void ufshpb_reset_host(struct ufs_hba *hba)
2297 struct ufshpb_lu *hpb;
2298 struct scsi_device *sdev;
2300 shost_for_each_device(sdev, hba->host) {
2301 hpb = ufshpb_get_hpb_data(sdev);
2305 if (ufshpb_get_state(hpb) != HPB_PRESENT)
2307 ufshpb_set_state(hpb, HPB_RESET);
2308 ufshpb_cancel_jobs(hpb);
2309 ufshpb_discard_rsp_lists(hpb);
2313 void ufshpb_suspend(struct ufs_hba *hba)
2315 struct ufshpb_lu *hpb;
2316 struct scsi_device *sdev;
2318 shost_for_each_device(sdev, hba->host) {
2319 hpb = ufshpb_get_hpb_data(sdev);
2323 if (ufshpb_get_state(hpb) != HPB_PRESENT)
2325 ufshpb_set_state(hpb, HPB_SUSPEND);
2326 ufshpb_cancel_jobs(hpb);
2330 void ufshpb_resume(struct ufs_hba *hba)
2332 struct ufshpb_lu *hpb;
2333 struct scsi_device *sdev;
2335 shost_for_each_device(sdev, hba->host) {
2336 hpb = ufshpb_get_hpb_data(sdev);
2340 if ((ufshpb_get_state(hpb) != HPB_PRESENT) &&
2341 (ufshpb_get_state(hpb) != HPB_SUSPEND))
2343 ufshpb_set_state(hpb, HPB_PRESENT);
2344 ufshpb_kick_map_work(hpb);
2347 hpb->params.timeout_polling_interval_ms;
2349 schedule_delayed_work(&hpb->ufshpb_read_to_work,
2350 msecs_to_jiffies(poll));
2355 static int ufshpb_get_lu_info(struct ufs_hba *hba, int lun,
2356 struct ufshpb_lu_info *hpb_lu_info)
2358 u16 max_active_rgns;
2362 char desc_buf[QUERY_DESC_MAX_SIZE];
2364 ufshcd_map_desc_id_to_length(hba, QUERY_DESC_IDN_UNIT, &size);
2366 ufshcd_rpm_get_sync(hba);
2367 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
2368 QUERY_DESC_IDN_UNIT, lun, 0,
2370 ufshcd_rpm_put_sync(hba);
2374 "%s: idn: %d lun: %d query request failed",
2375 __func__, QUERY_DESC_IDN_UNIT, lun);
2379 lu_enable = desc_buf[UNIT_DESC_PARAM_LU_ENABLE];
2380 if (lu_enable != LU_ENABLED_HPB_FUNC)
2383 max_active_rgns = get_unaligned_be16(
2384 desc_buf + UNIT_DESC_PARAM_HPB_LU_MAX_ACTIVE_RGNS);
2385 if (!max_active_rgns) {
2387 "lun %d wrong number of max active regions\n", lun);
2391 hpb_lu_info->num_blocks = get_unaligned_be64(
2392 desc_buf + UNIT_DESC_PARAM_LOGICAL_BLK_COUNT);
2393 hpb_lu_info->pinned_start = get_unaligned_be16(
2394 desc_buf + UNIT_DESC_PARAM_HPB_PIN_RGN_START_OFF);
2395 hpb_lu_info->num_pinned = get_unaligned_be16(
2396 desc_buf + UNIT_DESC_PARAM_HPB_NUM_PIN_RGNS);
2397 hpb_lu_info->max_active_rgns = max_active_rgns;
2402 void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev)
2404 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2409 ufshpb_set_state(hpb, HPB_FAILED);
2411 sdev = hpb->sdev_ufs_lu;
2412 sdev->hostdata = NULL;
2414 ufshpb_cancel_jobs(hpb);
2416 ufshpb_pre_req_mempool_destroy(hpb);
2417 ufshpb_destroy_region_tbl(hpb);
2419 kmem_cache_destroy(hpb->map_req_cache);
2420 kmem_cache_destroy(hpb->m_page_cache);
2422 list_del_init(&hpb->list_hpb_lu);
2427 static void ufshpb_hpb_lu_prepared(struct ufs_hba *hba)
2430 struct ufshpb_lu *hpb;
2431 struct scsi_device *sdev;
2434 if (tot_active_srgn_pages == 0) {
2439 init_success = !ufshpb_check_hpb_reset_query(hba);
2441 pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE;
2442 if (pool_size > tot_active_srgn_pages) {
2443 mempool_resize(ufshpb_mctx_pool, tot_active_srgn_pages);
2444 mempool_resize(ufshpb_page_pool, tot_active_srgn_pages);
2447 shost_for_each_device(sdev, hba->host) {
2448 hpb = ufshpb_get_hpb_data(sdev);
2453 ufshpb_set_state(hpb, HPB_PRESENT);
2454 if ((hpb->lu_pinned_end - hpb->lu_pinned_start) > 0)
2455 queue_work(ufshpb_wq, &hpb->map_work);
2457 ufshpb_issue_umap_all_req(hpb);
2459 dev_err(hba->dev, "destroy HPB lu %d\n", hpb->lun);
2460 ufshpb_destroy_lu(hba, sdev);
2468 void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev)
2470 struct ufshpb_lu *hpb;
2472 struct ufshpb_lu_info hpb_lu_info = { 0 };
2473 int lun = sdev->lun;
2475 if (lun >= hba->dev_info.max_lu_supported)
2478 ret = ufshpb_get_lu_info(hba, lun, &hpb_lu_info);
2482 hpb = ufshpb_alloc_hpb_lu(hba, sdev, &hba->ufshpb_dev,
2487 tot_active_srgn_pages += hpb_lu_info.max_active_rgns *
2488 hpb->srgns_per_rgn * hpb->pages_per_srgn;
2491 /* All LUs are initialized */
2492 if (atomic_dec_and_test(&hba->ufshpb_dev.slave_conf_cnt))
2493 ufshpb_hpb_lu_prepared(hba);
2496 static int ufshpb_init_mem_wq(struct ufs_hba *hba)
2499 unsigned int pool_size;
2501 ufshpb_mctx_cache = kmem_cache_create("ufshpb_mctx_cache",
2502 sizeof(struct ufshpb_map_ctx),
2504 if (!ufshpb_mctx_cache) {
2505 dev_err(hba->dev, "ufshpb: cannot init mctx cache\n");
2509 pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE;
2510 dev_info(hba->dev, "%s:%d ufshpb_host_map_kbytes %u pool_size %u\n",
2511 __func__, __LINE__, ufshpb_host_map_kbytes, pool_size);
2513 ufshpb_mctx_pool = mempool_create_slab_pool(pool_size,
2515 if (!ufshpb_mctx_pool) {
2516 dev_err(hba->dev, "ufshpb: cannot init mctx pool\n");
2518 goto release_mctx_cache;
2521 ufshpb_page_pool = mempool_create_page_pool(pool_size, 0);
2522 if (!ufshpb_page_pool) {
2523 dev_err(hba->dev, "ufshpb: cannot init page pool\n");
2525 goto release_mctx_pool;
2528 ufshpb_wq = alloc_workqueue("ufshpb-wq",
2529 WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
2531 dev_err(hba->dev, "ufshpb: alloc workqueue failed\n");
2533 goto release_page_pool;
2539 mempool_destroy(ufshpb_page_pool);
2541 mempool_destroy(ufshpb_mctx_pool);
2543 kmem_cache_destroy(ufshpb_mctx_cache);
2547 void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf)
2549 struct ufshpb_dev_info *hpb_info = &hba->ufshpb_dev;
2550 int max_active_rgns = 0;
2553 hpb_num_lu = geo_buf[GEOMETRY_DESC_PARAM_HPB_NUMBER_LU];
2554 if (hpb_num_lu == 0) {
2555 dev_err(hba->dev, "No HPB LU supported\n");
2556 hpb_info->hpb_disabled = true;
2560 hpb_info->rgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_REGION_SIZE];
2561 hpb_info->srgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_SUBREGION_SIZE];
2562 max_active_rgns = get_unaligned_be16(geo_buf +
2563 GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS);
2565 if (hpb_info->rgn_size == 0 || hpb_info->srgn_size == 0 ||
2566 max_active_rgns == 0) {
2567 dev_err(hba->dev, "No HPB supported device\n");
2568 hpb_info->hpb_disabled = true;
2573 void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf)
2575 struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev;
2579 hpb_dev_info->control_mode = desc_buf[DEVICE_DESC_PARAM_HPB_CONTROL];
2581 version = get_unaligned_be16(desc_buf + DEVICE_DESC_PARAM_HPB_VER);
2582 if ((version != HPB_SUPPORT_VERSION) &&
2583 (version != HPB_SUPPORT_LEGACY_VERSION)) {
2584 dev_err(hba->dev, "%s: HPB %x version is not supported.\n",
2586 hpb_dev_info->hpb_disabled = true;
2590 if (version == HPB_SUPPORT_LEGACY_VERSION)
2591 hpb_dev_info->is_legacy = true;
2594 * Get the number of user logical unit to check whether all
2595 * scsi_device finish initialization
2597 hpb_dev_info->num_lu = desc_buf[DEVICE_DESC_PARAM_NUM_LU];
2599 if (hpb_dev_info->is_legacy)
2602 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
2603 QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD, 0, 0, &max_single_cmd);
2606 hpb_dev_info->max_hpb_single_cmd = HPB_LEGACY_CHUNK_HIGH;
2608 hpb_dev_info->max_hpb_single_cmd = min(max_single_cmd + 1, HPB_MULTI_CHUNK_HIGH);
2611 void ufshpb_init(struct ufs_hba *hba)
2613 struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev;
2617 if (!ufshpb_is_allowed(hba) || !hba->dev_info.hpb_enabled)
2620 if (ufshpb_init_mem_wq(hba)) {
2621 hpb_dev_info->hpb_disabled = true;
2625 atomic_set(&hpb_dev_info->slave_conf_cnt, hpb_dev_info->num_lu);
2626 tot_active_srgn_pages = 0;
2627 /* issue HPB reset query */
2628 for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) {
2629 ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
2630 QUERY_FLAG_IDN_HPB_RESET, 0, NULL);
2636 void ufshpb_remove(struct ufs_hba *hba)
2638 mempool_destroy(ufshpb_page_pool);
2639 mempool_destroy(ufshpb_mctx_pool);
2640 kmem_cache_destroy(ufshpb_mctx_cache);
2642 destroy_workqueue(ufshpb_wq);
2645 module_param(ufshpb_host_map_kbytes, uint, 0644);
2646 MODULE_PARM_DESC(ufshpb_host_map_kbytes,
2647 "ufshpb host mapping memory kilo-bytes for ufshpb memory-pool");