GNU Linux-libre 5.15.54-gnu
[releases.git] / drivers / scsi / ufs / ufshpb.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Universal Flash Storage Host Performance Booster
4  *
5  * Copyright (C) 2017-2021 Samsung Electronics Co., Ltd.
6  *
7  * Authors:
8  *      Yongmyung Lee <ymhungry.lee@samsung.com>
9  *      Jinyoung Choi <j-young.choi@samsung.com>
10  */
11
12 #include <asm/unaligned.h>
13 #include <linux/async.h>
14
15 #include "ufshcd.h"
16 #include "ufshpb.h"
17 #include "../sd.h"
18
19 #define ACTIVATION_THRESHOLD 8 /* 8 IOs */
20 #define READ_TO_MS 1000
21 #define READ_TO_EXPIRIES 100
22 #define POLLING_INTERVAL_MS 200
23 #define THROTTLE_MAP_REQ_DEFAULT 1
24
25 /* memory management */
26 static struct kmem_cache *ufshpb_mctx_cache;
27 static mempool_t *ufshpb_mctx_pool;
28 static mempool_t *ufshpb_page_pool;
29 /* A cache size of 2MB can cache ppn in the 1GB range. */
30 static unsigned int ufshpb_host_map_kbytes = 2048;
31 static int tot_active_srgn_pages;
32
33 static struct workqueue_struct *ufshpb_wq;
34
35 static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
36                                       int srgn_idx);
37
38 bool ufshpb_is_allowed(struct ufs_hba *hba)
39 {
40         return !(hba->ufshpb_dev.hpb_disabled);
41 }
42
43 /* HPB version 1.0 is called as legacy version. */
44 bool ufshpb_is_legacy(struct ufs_hba *hba)
45 {
46         return hba->ufshpb_dev.is_legacy;
47 }
48
49 static struct ufshpb_lu *ufshpb_get_hpb_data(struct scsi_device *sdev)
50 {
51         return sdev->hostdata;
52 }
53
54 static int ufshpb_get_state(struct ufshpb_lu *hpb)
55 {
56         return atomic_read(&hpb->hpb_state);
57 }
58
59 static void ufshpb_set_state(struct ufshpb_lu *hpb, int state)
60 {
61         atomic_set(&hpb->hpb_state, state);
62 }
63
64 static int ufshpb_is_valid_srgn(struct ufshpb_region *rgn,
65                                 struct ufshpb_subregion *srgn)
66 {
67         return rgn->rgn_state != HPB_RGN_INACTIVE &&
68                 srgn->srgn_state == HPB_SRGN_VALID;
69 }
70
71 static bool ufshpb_is_read_cmd(struct scsi_cmnd *cmd)
72 {
73         return req_op(scsi_cmd_to_rq(cmd)) == REQ_OP_READ;
74 }
75
76 static bool ufshpb_is_write_or_discard(struct scsi_cmnd *cmd)
77 {
78         return op_is_write(req_op(scsi_cmd_to_rq(cmd))) ||
79                op_is_discard(req_op(scsi_cmd_to_rq(cmd)));
80 }
81
82 static bool ufshpb_is_supported_chunk(struct ufshpb_lu *hpb, int transfer_len)
83 {
84         return transfer_len <= hpb->pre_req_max_tr_len;
85 }
86
87 static bool ufshpb_is_general_lun(int lun)
88 {
89         return lun < UFS_UPIU_MAX_UNIT_NUM_ID;
90 }
91
92 static bool ufshpb_is_pinned_region(struct ufshpb_lu *hpb, int rgn_idx)
93 {
94         if (hpb->lu_pinned_end != PINNED_NOT_SET &&
95             rgn_idx >= hpb->lu_pinned_start &&
96             rgn_idx <= hpb->lu_pinned_end)
97                 return true;
98
99         return false;
100 }
101
102 static void ufshpb_kick_map_work(struct ufshpb_lu *hpb)
103 {
104         bool ret = false;
105         unsigned long flags;
106
107         if (ufshpb_get_state(hpb) != HPB_PRESENT)
108                 return;
109
110         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
111         if (!list_empty(&hpb->lh_inact_rgn) || !list_empty(&hpb->lh_act_srgn))
112                 ret = true;
113         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
114
115         if (ret)
116                 queue_work(ufshpb_wq, &hpb->map_work);
117 }
118
119 static bool ufshpb_is_hpb_rsp_valid(struct ufs_hba *hba,
120                                     struct ufshcd_lrb *lrbp,
121                                     struct utp_hpb_rsp *rsp_field)
122 {
123         /* Check HPB_UPDATE_ALERT */
124         if (!(lrbp->ucd_rsp_ptr->header.dword_2 &
125               UPIU_HEADER_DWORD(0, 2, 0, 0)))
126                 return false;
127
128         if (be16_to_cpu(rsp_field->sense_data_len) != DEV_SENSE_SEG_LEN ||
129             rsp_field->desc_type != DEV_DES_TYPE ||
130             rsp_field->additional_len != DEV_ADDITIONAL_LEN ||
131             rsp_field->active_rgn_cnt > MAX_ACTIVE_NUM ||
132             rsp_field->inactive_rgn_cnt > MAX_INACTIVE_NUM ||
133             rsp_field->hpb_op == HPB_RSP_NONE ||
134             (rsp_field->hpb_op == HPB_RSP_REQ_REGION_UPDATE &&
135              !rsp_field->active_rgn_cnt && !rsp_field->inactive_rgn_cnt))
136                 return false;
137
138         if (!ufshpb_is_general_lun(rsp_field->lun)) {
139                 dev_warn(hba->dev, "ufshpb: lun(%d) not supported\n",
140                          lrbp->lun);
141                 return false;
142         }
143
144         return true;
145 }
146
147 static void ufshpb_iterate_rgn(struct ufshpb_lu *hpb, int rgn_idx, int srgn_idx,
148                                int srgn_offset, int cnt, bool set_dirty)
149 {
150         struct ufshpb_region *rgn;
151         struct ufshpb_subregion *srgn, *prev_srgn = NULL;
152         int set_bit_len;
153         int bitmap_len;
154         unsigned long flags;
155
156 next_srgn:
157         rgn = hpb->rgn_tbl + rgn_idx;
158         srgn = rgn->srgn_tbl + srgn_idx;
159
160         if (likely(!srgn->is_last))
161                 bitmap_len = hpb->entries_per_srgn;
162         else
163                 bitmap_len = hpb->last_srgn_entries;
164
165         if ((srgn_offset + cnt) > bitmap_len)
166                 set_bit_len = bitmap_len - srgn_offset;
167         else
168                 set_bit_len = cnt;
169
170         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
171         if (rgn->rgn_state != HPB_RGN_INACTIVE) {
172                 if (set_dirty) {
173                         if (srgn->srgn_state == HPB_SRGN_VALID)
174                                 bitmap_set(srgn->mctx->ppn_dirty, srgn_offset,
175                                            set_bit_len);
176                 } else if (hpb->is_hcm) {
177                          /* rewind the read timer for lru regions */
178                         rgn->read_timeout = ktime_add_ms(ktime_get(),
179                                         rgn->hpb->params.read_timeout_ms);
180                         rgn->read_timeout_expiries =
181                                 rgn->hpb->params.read_timeout_expiries;
182                 }
183         }
184         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
185
186         if (hpb->is_hcm && prev_srgn != srgn) {
187                 bool activate = false;
188
189                 spin_lock(&rgn->rgn_lock);
190                 if (set_dirty) {
191                         rgn->reads -= srgn->reads;
192                         srgn->reads = 0;
193                         set_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
194                 } else {
195                         srgn->reads++;
196                         rgn->reads++;
197                         if (srgn->reads == hpb->params.activation_thld)
198                                 activate = true;
199                 }
200                 spin_unlock(&rgn->rgn_lock);
201
202                 if (activate ||
203                     test_and_clear_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags)) {
204                         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
205                         ufshpb_update_active_info(hpb, rgn_idx, srgn_idx);
206                         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
207                         dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
208                                 "activate region %d-%d\n", rgn_idx, srgn_idx);
209                 }
210
211                 prev_srgn = srgn;
212         }
213
214         srgn_offset = 0;
215         if (++srgn_idx == hpb->srgns_per_rgn) {
216                 srgn_idx = 0;
217                 rgn_idx++;
218         }
219
220         cnt -= set_bit_len;
221         if (cnt > 0)
222                 goto next_srgn;
223 }
224
225 static bool ufshpb_test_ppn_dirty(struct ufshpb_lu *hpb, int rgn_idx,
226                                   int srgn_idx, int srgn_offset, int cnt)
227 {
228         struct ufshpb_region *rgn;
229         struct ufshpb_subregion *srgn;
230         int bitmap_len;
231         int bit_len;
232
233 next_srgn:
234         rgn = hpb->rgn_tbl + rgn_idx;
235         srgn = rgn->srgn_tbl + srgn_idx;
236
237         if (likely(!srgn->is_last))
238                 bitmap_len = hpb->entries_per_srgn;
239         else
240                 bitmap_len = hpb->last_srgn_entries;
241
242         if (!ufshpb_is_valid_srgn(rgn, srgn))
243                 return true;
244
245         /*
246          * If the region state is active, mctx must be allocated.
247          * In this case, check whether the region is evicted or
248          * mctx allocation fail.
249          */
250         if (unlikely(!srgn->mctx)) {
251                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
252                         "no mctx in region %d subregion %d.\n",
253                         srgn->rgn_idx, srgn->srgn_idx);
254                 return true;
255         }
256
257         if ((srgn_offset + cnt) > bitmap_len)
258                 bit_len = bitmap_len - srgn_offset;
259         else
260                 bit_len = cnt;
261
262         if (find_next_bit(srgn->mctx->ppn_dirty, bit_len + srgn_offset,
263                           srgn_offset) < bit_len + srgn_offset)
264                 return true;
265
266         srgn_offset = 0;
267         if (++srgn_idx == hpb->srgns_per_rgn) {
268                 srgn_idx = 0;
269                 rgn_idx++;
270         }
271
272         cnt -= bit_len;
273         if (cnt > 0)
274                 goto next_srgn;
275
276         return false;
277 }
278
279 static inline bool is_rgn_dirty(struct ufshpb_region *rgn)
280 {
281         return test_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
282 }
283
284 static int ufshpb_fill_ppn_from_page(struct ufshpb_lu *hpb,
285                                      struct ufshpb_map_ctx *mctx, int pos,
286                                      int len, __be64 *ppn_buf)
287 {
288         struct page *page;
289         int index, offset;
290         int copied;
291
292         index = pos / (PAGE_SIZE / HPB_ENTRY_SIZE);
293         offset = pos % (PAGE_SIZE / HPB_ENTRY_SIZE);
294
295         if ((offset + len) <= (PAGE_SIZE / HPB_ENTRY_SIZE))
296                 copied = len;
297         else
298                 copied = (PAGE_SIZE / HPB_ENTRY_SIZE) - offset;
299
300         page = mctx->m_page[index];
301         if (unlikely(!page)) {
302                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
303                         "error. cannot find page in mctx\n");
304                 return -ENOMEM;
305         }
306
307         memcpy(ppn_buf, page_address(page) + (offset * HPB_ENTRY_SIZE),
308                copied * HPB_ENTRY_SIZE);
309
310         return copied;
311 }
312
313 static void
314 ufshpb_get_pos_from_lpn(struct ufshpb_lu *hpb, unsigned long lpn, int *rgn_idx,
315                         int *srgn_idx, int *offset)
316 {
317         int rgn_offset;
318
319         *rgn_idx = lpn >> hpb->entries_per_rgn_shift;
320         rgn_offset = lpn & hpb->entries_per_rgn_mask;
321         *srgn_idx = rgn_offset >> hpb->entries_per_srgn_shift;
322         *offset = rgn_offset & hpb->entries_per_srgn_mask;
323 }
324
325 static void
326 ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
327                             __be64 ppn, u8 transfer_len)
328 {
329         unsigned char *cdb = lrbp->cmd->cmnd;
330         __be64 ppn_tmp = ppn;
331         cdb[0] = UFSHPB_READ;
332
333         if (hba->dev_quirks & UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ)
334                 ppn_tmp = swab64(ppn);
335
336         /* ppn value is stored as big-endian in the host memory */
337         memcpy(&cdb[6], &ppn_tmp, sizeof(__be64));
338         cdb[14] = transfer_len;
339         cdb[15] = 0;
340
341         lrbp->cmd->cmd_len = UFS_CDB_SIZE;
342 }
343
344 /*
345  * This function will set up HPB read command using host-side L2P map data.
346  */
347 int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
348 {
349         struct ufshpb_lu *hpb;
350         struct ufshpb_region *rgn;
351         struct ufshpb_subregion *srgn;
352         struct scsi_cmnd *cmd = lrbp->cmd;
353         u32 lpn;
354         __be64 ppn;
355         unsigned long flags;
356         int transfer_len, rgn_idx, srgn_idx, srgn_offset;
357         int err = 0;
358
359         hpb = ufshpb_get_hpb_data(cmd->device);
360         if (!hpb)
361                 return -ENODEV;
362
363         if (ufshpb_get_state(hpb) == HPB_INIT)
364                 return -ENODEV;
365
366         if (ufshpb_get_state(hpb) != HPB_PRESENT) {
367                 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
368                            "%s: ufshpb state is not PRESENT", __func__);
369                 return -ENODEV;
370         }
371
372         if (blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)) ||
373             (!ufshpb_is_write_or_discard(cmd) &&
374              !ufshpb_is_read_cmd(cmd)))
375                 return 0;
376
377         transfer_len = sectors_to_logical(cmd->device,
378                                           blk_rq_sectors(scsi_cmd_to_rq(cmd)));
379         if (unlikely(!transfer_len))
380                 return 0;
381
382         lpn = sectors_to_logical(cmd->device, blk_rq_pos(scsi_cmd_to_rq(cmd)));
383         ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset);
384         rgn = hpb->rgn_tbl + rgn_idx;
385         srgn = rgn->srgn_tbl + srgn_idx;
386
387         /* If command type is WRITE or DISCARD, set bitmap as drity */
388         if (ufshpb_is_write_or_discard(cmd)) {
389                 ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
390                                    transfer_len, true);
391                 return 0;
392         }
393
394         if (!ufshpb_is_supported_chunk(hpb, transfer_len))
395                 return 0;
396
397         if (hpb->is_hcm) {
398                 /*
399                  * in host control mode, reads are the main source for
400                  * activation trials.
401                  */
402                 ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
403                                    transfer_len, false);
404
405                 /* keep those counters normalized */
406                 if (rgn->reads > hpb->entries_per_srgn)
407                         schedule_work(&hpb->ufshpb_normalization_work);
408         }
409
410         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
411         if (ufshpb_test_ppn_dirty(hpb, rgn_idx, srgn_idx, srgn_offset,
412                                    transfer_len)) {
413                 hpb->stats.miss_cnt++;
414                 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
415                 return 0;
416         }
417
418         err = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset, 1, &ppn);
419         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
420         if (unlikely(err < 0)) {
421                 /*
422                  * In this case, the region state is active,
423                  * but the ppn table is not allocated.
424                  * Make sure that ppn table must be allocated on
425                  * active state.
426                  */
427                 dev_err(hba->dev, "get ppn failed. err %d\n", err);
428                 return err;
429         }
430
431         ufshpb_set_hpb_read_to_upiu(hba, lrbp, ppn, transfer_len);
432
433         hpb->stats.hit_cnt++;
434         return 0;
435 }
436
437 static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb,
438                                          int rgn_idx, enum req_opf dir,
439                                          bool atomic)
440 {
441         struct ufshpb_req *rq;
442         struct request *req;
443         int retries = HPB_MAP_REQ_RETRIES;
444
445         rq = kmem_cache_alloc(hpb->map_req_cache, GFP_KERNEL);
446         if (!rq)
447                 return NULL;
448
449 retry:
450         req = blk_get_request(hpb->sdev_ufs_lu->request_queue, dir,
451                               BLK_MQ_REQ_NOWAIT);
452
453         if (!atomic && (PTR_ERR(req) == -EWOULDBLOCK) && (--retries > 0)) {
454                 usleep_range(3000, 3100);
455                 goto retry;
456         }
457
458         if (IS_ERR(req))
459                 goto free_rq;
460
461         rq->hpb = hpb;
462         rq->req = req;
463         rq->rb.rgn_idx = rgn_idx;
464
465         return rq;
466
467 free_rq:
468         kmem_cache_free(hpb->map_req_cache, rq);
469         return NULL;
470 }
471
472 static void ufshpb_put_req(struct ufshpb_lu *hpb, struct ufshpb_req *rq)
473 {
474         blk_put_request(rq->req);
475         kmem_cache_free(hpb->map_req_cache, rq);
476 }
477
478 static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb,
479                                              struct ufshpb_subregion *srgn)
480 {
481         struct ufshpb_req *map_req;
482         struct bio *bio;
483         unsigned long flags;
484
485         if (hpb->is_hcm &&
486             hpb->num_inflight_map_req >= hpb->params.inflight_map_req) {
487                 dev_info(&hpb->sdev_ufs_lu->sdev_dev,
488                          "map_req throttle. inflight %d throttle %d",
489                          hpb->num_inflight_map_req,
490                          hpb->params.inflight_map_req);
491                 return NULL;
492         }
493
494         map_req = ufshpb_get_req(hpb, srgn->rgn_idx, REQ_OP_DRV_IN, false);
495         if (!map_req)
496                 return NULL;
497
498         bio = bio_alloc(GFP_KERNEL, hpb->pages_per_srgn);
499         if (!bio) {
500                 ufshpb_put_req(hpb, map_req);
501                 return NULL;
502         }
503
504         map_req->bio = bio;
505
506         map_req->rb.srgn_idx = srgn->srgn_idx;
507         map_req->rb.mctx = srgn->mctx;
508
509         spin_lock_irqsave(&hpb->param_lock, flags);
510         hpb->num_inflight_map_req++;
511         spin_unlock_irqrestore(&hpb->param_lock, flags);
512
513         return map_req;
514 }
515
516 static void ufshpb_put_map_req(struct ufshpb_lu *hpb,
517                                struct ufshpb_req *map_req)
518 {
519         unsigned long flags;
520
521         bio_put(map_req->bio);
522         ufshpb_put_req(hpb, map_req);
523
524         spin_lock_irqsave(&hpb->param_lock, flags);
525         hpb->num_inflight_map_req--;
526         spin_unlock_irqrestore(&hpb->param_lock, flags);
527 }
528
529 static int ufshpb_clear_dirty_bitmap(struct ufshpb_lu *hpb,
530                                      struct ufshpb_subregion *srgn)
531 {
532         struct ufshpb_region *rgn;
533         u32 num_entries = hpb->entries_per_srgn;
534
535         if (!srgn->mctx) {
536                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
537                         "no mctx in region %d subregion %d.\n",
538                         srgn->rgn_idx, srgn->srgn_idx);
539                 return -1;
540         }
541
542         if (unlikely(srgn->is_last))
543                 num_entries = hpb->last_srgn_entries;
544
545         bitmap_zero(srgn->mctx->ppn_dirty, num_entries);
546
547         rgn = hpb->rgn_tbl + srgn->rgn_idx;
548         clear_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
549
550         return 0;
551 }
552
553 static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
554                                       int srgn_idx)
555 {
556         struct ufshpb_region *rgn;
557         struct ufshpb_subregion *srgn;
558
559         rgn = hpb->rgn_tbl + rgn_idx;
560         srgn = rgn->srgn_tbl + srgn_idx;
561
562         list_del_init(&rgn->list_inact_rgn);
563
564         if (list_empty(&srgn->list_act_srgn))
565                 list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
566
567         hpb->stats.rb_active_cnt++;
568 }
569
570 static void ufshpb_update_inactive_info(struct ufshpb_lu *hpb, int rgn_idx)
571 {
572         struct ufshpb_region *rgn;
573         struct ufshpb_subregion *srgn;
574         int srgn_idx;
575
576         rgn = hpb->rgn_tbl + rgn_idx;
577
578         for_each_sub_region(rgn, srgn_idx, srgn)
579                 list_del_init(&srgn->list_act_srgn);
580
581         if (list_empty(&rgn->list_inact_rgn))
582                 list_add_tail(&rgn->list_inact_rgn, &hpb->lh_inact_rgn);
583
584         hpb->stats.rb_inactive_cnt++;
585 }
586
587 static void ufshpb_activate_subregion(struct ufshpb_lu *hpb,
588                                       struct ufshpb_subregion *srgn)
589 {
590         struct ufshpb_region *rgn;
591
592         /*
593          * If there is no mctx in subregion
594          * after I/O progress for HPB_READ_BUFFER, the region to which the
595          * subregion belongs was evicted.
596          * Make sure the region must not evict in I/O progress
597          */
598         if (!srgn->mctx) {
599                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
600                         "no mctx in region %d subregion %d.\n",
601                         srgn->rgn_idx, srgn->srgn_idx);
602                 srgn->srgn_state = HPB_SRGN_INVALID;
603                 return;
604         }
605
606         rgn = hpb->rgn_tbl + srgn->rgn_idx;
607
608         if (unlikely(rgn->rgn_state == HPB_RGN_INACTIVE)) {
609                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
610                         "region %d subregion %d evicted\n",
611                         srgn->rgn_idx, srgn->srgn_idx);
612                 srgn->srgn_state = HPB_SRGN_INVALID;
613                 return;
614         }
615         srgn->srgn_state = HPB_SRGN_VALID;
616 }
617
618 static void ufshpb_umap_req_compl_fn(struct request *req, blk_status_t error)
619 {
620         struct ufshpb_req *umap_req = (struct ufshpb_req *)req->end_io_data;
621
622         ufshpb_put_req(umap_req->hpb, umap_req);
623 }
624
625 static void ufshpb_map_req_compl_fn(struct request *req, blk_status_t error)
626 {
627         struct ufshpb_req *map_req = (struct ufshpb_req *) req->end_io_data;
628         struct ufshpb_lu *hpb = map_req->hpb;
629         struct ufshpb_subregion *srgn;
630         unsigned long flags;
631
632         srgn = hpb->rgn_tbl[map_req->rb.rgn_idx].srgn_tbl +
633                 map_req->rb.srgn_idx;
634
635         ufshpb_clear_dirty_bitmap(hpb, srgn);
636         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
637         ufshpb_activate_subregion(hpb, srgn);
638         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
639
640         ufshpb_put_map_req(map_req->hpb, map_req);
641 }
642
643 static void ufshpb_set_unmap_cmd(unsigned char *cdb, struct ufshpb_region *rgn)
644 {
645         cdb[0] = UFSHPB_WRITE_BUFFER;
646         cdb[1] = rgn ? UFSHPB_WRITE_BUFFER_INACT_SINGLE_ID :
647                           UFSHPB_WRITE_BUFFER_INACT_ALL_ID;
648         if (rgn)
649                 put_unaligned_be16(rgn->rgn_idx, &cdb[2]);
650         cdb[9] = 0x00;
651 }
652
653 static void ufshpb_set_read_buf_cmd(unsigned char *cdb, int rgn_idx,
654                                     int srgn_idx, int srgn_mem_size)
655 {
656         cdb[0] = UFSHPB_READ_BUFFER;
657         cdb[1] = UFSHPB_READ_BUFFER_ID;
658
659         put_unaligned_be16(rgn_idx, &cdb[2]);
660         put_unaligned_be16(srgn_idx, &cdb[4]);
661         put_unaligned_be24(srgn_mem_size, &cdb[6]);
662
663         cdb[9] = 0x00;
664 }
665
666 static void ufshpb_execute_umap_req(struct ufshpb_lu *hpb,
667                                    struct ufshpb_req *umap_req,
668                                    struct ufshpb_region *rgn)
669 {
670         struct request *req;
671         struct scsi_request *rq;
672
673         req = umap_req->req;
674         req->timeout = 0;
675         req->end_io_data = (void *)umap_req;
676         rq = scsi_req(req);
677         ufshpb_set_unmap_cmd(rq->cmd, rgn);
678         rq->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH;
679
680         blk_execute_rq_nowait(NULL, req, 1, ufshpb_umap_req_compl_fn);
681
682         hpb->stats.umap_req_cnt++;
683 }
684
685 static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
686                                   struct ufshpb_req *map_req, bool last)
687 {
688         struct request_queue *q;
689         struct request *req;
690         struct scsi_request *rq;
691         int mem_size = hpb->srgn_mem_size;
692         int ret = 0;
693         int i;
694
695         q = hpb->sdev_ufs_lu->request_queue;
696         for (i = 0; i < hpb->pages_per_srgn; i++) {
697                 ret = bio_add_pc_page(q, map_req->bio, map_req->rb.mctx->m_page[i],
698                                       PAGE_SIZE, 0);
699                 if (ret != PAGE_SIZE) {
700                         dev_err(&hpb->sdev_ufs_lu->sdev_dev,
701                                    "bio_add_pc_page fail %d - %d\n",
702                                    map_req->rb.rgn_idx, map_req->rb.srgn_idx);
703                         return ret;
704                 }
705         }
706
707         req = map_req->req;
708
709         blk_rq_append_bio(req, map_req->bio);
710
711         req->end_io_data = map_req;
712
713         rq = scsi_req(req);
714
715         if (unlikely(last))
716                 mem_size = hpb->last_srgn_entries * HPB_ENTRY_SIZE;
717
718         ufshpb_set_read_buf_cmd(rq->cmd, map_req->rb.rgn_idx,
719                                 map_req->rb.srgn_idx, mem_size);
720         rq->cmd_len = HPB_READ_BUFFER_CMD_LENGTH;
721
722         blk_execute_rq_nowait(NULL, req, 1, ufshpb_map_req_compl_fn);
723
724         hpb->stats.map_req_cnt++;
725         return 0;
726 }
727
728 static struct ufshpb_map_ctx *ufshpb_get_map_ctx(struct ufshpb_lu *hpb,
729                                                  bool last)
730 {
731         struct ufshpb_map_ctx *mctx;
732         u32 num_entries = hpb->entries_per_srgn;
733         int i, j;
734
735         mctx = mempool_alloc(ufshpb_mctx_pool, GFP_KERNEL);
736         if (!mctx)
737                 return NULL;
738
739         mctx->m_page = kmem_cache_alloc(hpb->m_page_cache, GFP_KERNEL);
740         if (!mctx->m_page)
741                 goto release_mctx;
742
743         if (unlikely(last))
744                 num_entries = hpb->last_srgn_entries;
745
746         mctx->ppn_dirty = bitmap_zalloc(num_entries, GFP_KERNEL);
747         if (!mctx->ppn_dirty)
748                 goto release_m_page;
749
750         for (i = 0; i < hpb->pages_per_srgn; i++) {
751                 mctx->m_page[i] = mempool_alloc(ufshpb_page_pool, GFP_KERNEL);
752                 if (!mctx->m_page[i]) {
753                         for (j = 0; j < i; j++)
754                                 mempool_free(mctx->m_page[j], ufshpb_page_pool);
755                         goto release_ppn_dirty;
756                 }
757                 clear_page(page_address(mctx->m_page[i]));
758         }
759
760         return mctx;
761
762 release_ppn_dirty:
763         bitmap_free(mctx->ppn_dirty);
764 release_m_page:
765         kmem_cache_free(hpb->m_page_cache, mctx->m_page);
766 release_mctx:
767         mempool_free(mctx, ufshpb_mctx_pool);
768         return NULL;
769 }
770
771 static void ufshpb_put_map_ctx(struct ufshpb_lu *hpb,
772                                struct ufshpb_map_ctx *mctx)
773 {
774         int i;
775
776         for (i = 0; i < hpb->pages_per_srgn; i++)
777                 mempool_free(mctx->m_page[i], ufshpb_page_pool);
778
779         bitmap_free(mctx->ppn_dirty);
780         kmem_cache_free(hpb->m_page_cache, mctx->m_page);
781         mempool_free(mctx, ufshpb_mctx_pool);
782 }
783
784 static int ufshpb_check_srgns_issue_state(struct ufshpb_lu *hpb,
785                                           struct ufshpb_region *rgn)
786 {
787         struct ufshpb_subregion *srgn;
788         int srgn_idx;
789
790         for_each_sub_region(rgn, srgn_idx, srgn)
791                 if (srgn->srgn_state == HPB_SRGN_ISSUED)
792                         return -EPERM;
793
794         return 0;
795 }
796
797 static void ufshpb_read_to_handler(struct work_struct *work)
798 {
799         struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
800                                              ufshpb_read_to_work.work);
801         struct victim_select_info *lru_info = &hpb->lru_info;
802         struct ufshpb_region *rgn, *next_rgn;
803         unsigned long flags;
804         unsigned int poll;
805         LIST_HEAD(expired_list);
806
807         if (test_and_set_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits))
808                 return;
809
810         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
811
812         list_for_each_entry_safe(rgn, next_rgn, &lru_info->lh_lru_rgn,
813                                  list_lru_rgn) {
814                 bool timedout = ktime_after(ktime_get(), rgn->read_timeout);
815
816                 if (timedout) {
817                         rgn->read_timeout_expiries--;
818                         if (is_rgn_dirty(rgn) ||
819                             rgn->read_timeout_expiries == 0)
820                                 list_add(&rgn->list_expired_rgn, &expired_list);
821                         else
822                                 rgn->read_timeout = ktime_add_ms(ktime_get(),
823                                                 hpb->params.read_timeout_ms);
824                 }
825         }
826
827         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
828
829         list_for_each_entry_safe(rgn, next_rgn, &expired_list,
830                                  list_expired_rgn) {
831                 list_del_init(&rgn->list_expired_rgn);
832                 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
833                 ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
834                 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
835         }
836
837         ufshpb_kick_map_work(hpb);
838
839         clear_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits);
840
841         poll = hpb->params.timeout_polling_interval_ms;
842         schedule_delayed_work(&hpb->ufshpb_read_to_work,
843                               msecs_to_jiffies(poll));
844 }
845
846 static void ufshpb_add_lru_info(struct victim_select_info *lru_info,
847                                 struct ufshpb_region *rgn)
848 {
849         rgn->rgn_state = HPB_RGN_ACTIVE;
850         list_add_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn);
851         atomic_inc(&lru_info->active_cnt);
852         if (rgn->hpb->is_hcm) {
853                 rgn->read_timeout =
854                         ktime_add_ms(ktime_get(),
855                                      rgn->hpb->params.read_timeout_ms);
856                 rgn->read_timeout_expiries =
857                         rgn->hpb->params.read_timeout_expiries;
858         }
859 }
860
861 static void ufshpb_hit_lru_info(struct victim_select_info *lru_info,
862                                 struct ufshpb_region *rgn)
863 {
864         list_move_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn);
865 }
866
867 static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb)
868 {
869         struct victim_select_info *lru_info = &hpb->lru_info;
870         struct ufshpb_region *rgn, *victim_rgn = NULL;
871
872         list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn) {
873                 if (ufshpb_check_srgns_issue_state(hpb, rgn))
874                         continue;
875
876                 /*
877                  * in host control mode, verify that the exiting region
878                  * has fewer reads
879                  */
880                 if (hpb->is_hcm &&
881                     rgn->reads > hpb->params.eviction_thld_exit)
882                         continue;
883
884                 victim_rgn = rgn;
885                 break;
886         }
887
888         if (!victim_rgn)
889                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
890                         "%s: no region allocated\n",
891                         __func__);
892
893         return victim_rgn;
894 }
895
896 static void ufshpb_cleanup_lru_info(struct victim_select_info *lru_info,
897                                     struct ufshpb_region *rgn)
898 {
899         list_del_init(&rgn->list_lru_rgn);
900         rgn->rgn_state = HPB_RGN_INACTIVE;
901         atomic_dec(&lru_info->active_cnt);
902 }
903
904 static void ufshpb_purge_active_subregion(struct ufshpb_lu *hpb,
905                                           struct ufshpb_subregion *srgn)
906 {
907         if (srgn->srgn_state != HPB_SRGN_UNUSED) {
908                 ufshpb_put_map_ctx(hpb, srgn->mctx);
909                 srgn->srgn_state = HPB_SRGN_UNUSED;
910                 srgn->mctx = NULL;
911         }
912 }
913
914 static int ufshpb_issue_umap_req(struct ufshpb_lu *hpb,
915                                  struct ufshpb_region *rgn,
916                                  bool atomic)
917 {
918         struct ufshpb_req *umap_req;
919         int rgn_idx = rgn ? rgn->rgn_idx : 0;
920
921         umap_req = ufshpb_get_req(hpb, rgn_idx, REQ_OP_DRV_OUT, atomic);
922         if (!umap_req)
923                 return -ENOMEM;
924
925         ufshpb_execute_umap_req(hpb, umap_req, rgn);
926
927         return 0;
928 }
929
930 static int ufshpb_issue_umap_single_req(struct ufshpb_lu *hpb,
931                                         struct ufshpb_region *rgn)
932 {
933         return ufshpb_issue_umap_req(hpb, rgn, true);
934 }
935
936 static int ufshpb_issue_umap_all_req(struct ufshpb_lu *hpb)
937 {
938         return ufshpb_issue_umap_req(hpb, NULL, false);
939 }
940
941 static void __ufshpb_evict_region(struct ufshpb_lu *hpb,
942                                  struct ufshpb_region *rgn)
943 {
944         struct victim_select_info *lru_info;
945         struct ufshpb_subregion *srgn;
946         int srgn_idx;
947
948         lru_info = &hpb->lru_info;
949
950         dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "evict region %d\n", rgn->rgn_idx);
951
952         ufshpb_cleanup_lru_info(lru_info, rgn);
953
954         for_each_sub_region(rgn, srgn_idx, srgn)
955                 ufshpb_purge_active_subregion(hpb, srgn);
956 }
957
958 static int ufshpb_evict_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
959 {
960         unsigned long flags;
961         int ret = 0;
962
963         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
964         if (rgn->rgn_state == HPB_RGN_PINNED) {
965                 dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
966                          "pinned region cannot drop-out. region %d\n",
967                          rgn->rgn_idx);
968                 goto out;
969         }
970
971         if (!list_empty(&rgn->list_lru_rgn)) {
972                 if (ufshpb_check_srgns_issue_state(hpb, rgn)) {
973                         ret = -EBUSY;
974                         goto out;
975                 }
976
977                 if (hpb->is_hcm) {
978                         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
979                         ret = ufshpb_issue_umap_single_req(hpb, rgn);
980                         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
981                         if (ret)
982                                 goto out;
983                 }
984
985                 __ufshpb_evict_region(hpb, rgn);
986         }
987 out:
988         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
989         return ret;
990 }
991
992 static int ufshpb_issue_map_req(struct ufshpb_lu *hpb,
993                                 struct ufshpb_region *rgn,
994                                 struct ufshpb_subregion *srgn)
995 {
996         struct ufshpb_req *map_req;
997         unsigned long flags;
998         int ret;
999         int err = -EAGAIN;
1000         bool alloc_required = false;
1001         enum HPB_SRGN_STATE state = HPB_SRGN_INVALID;
1002
1003         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1004
1005         if (ufshpb_get_state(hpb) != HPB_PRESENT) {
1006                 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1007                            "%s: ufshpb state is not PRESENT\n", __func__);
1008                 goto unlock_out;
1009         }
1010
1011         if ((rgn->rgn_state == HPB_RGN_INACTIVE) &&
1012             (srgn->srgn_state == HPB_SRGN_INVALID)) {
1013                 err = 0;
1014                 goto unlock_out;
1015         }
1016
1017         if (srgn->srgn_state == HPB_SRGN_UNUSED)
1018                 alloc_required = true;
1019
1020         /*
1021          * If the subregion is already ISSUED state,
1022          * a specific event (e.g., GC or wear-leveling, etc.) occurs in
1023          * the device and HPB response for map loading is received.
1024          * In this case, after finishing the HPB_READ_BUFFER,
1025          * the next HPB_READ_BUFFER is performed again to obtain the latest
1026          * map data.
1027          */
1028         if (srgn->srgn_state == HPB_SRGN_ISSUED)
1029                 goto unlock_out;
1030
1031         srgn->srgn_state = HPB_SRGN_ISSUED;
1032         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1033
1034         if (alloc_required) {
1035                 srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last);
1036                 if (!srgn->mctx) {
1037                         dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1038                             "get map_ctx failed. region %d - %d\n",
1039                             rgn->rgn_idx, srgn->srgn_idx);
1040                         state = HPB_SRGN_UNUSED;
1041                         goto change_srgn_state;
1042                 }
1043         }
1044
1045         map_req = ufshpb_get_map_req(hpb, srgn);
1046         if (!map_req)
1047                 goto change_srgn_state;
1048
1049
1050         ret = ufshpb_execute_map_req(hpb, map_req, srgn->is_last);
1051         if (ret) {
1052                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1053                            "%s: issue map_req failed: %d, region %d - %d\n",
1054                            __func__, ret, srgn->rgn_idx, srgn->srgn_idx);
1055                 goto free_map_req;
1056         }
1057         return 0;
1058
1059 free_map_req:
1060         ufshpb_put_map_req(hpb, map_req);
1061 change_srgn_state:
1062         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1063         srgn->srgn_state = state;
1064 unlock_out:
1065         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1066         return err;
1067 }
1068
1069 static int ufshpb_add_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
1070 {
1071         struct ufshpb_region *victim_rgn = NULL;
1072         struct victim_select_info *lru_info = &hpb->lru_info;
1073         unsigned long flags;
1074         int ret = 0;
1075
1076         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1077         /*
1078          * If region belongs to lru_list, just move the region
1079          * to the front of lru list because the state of the region
1080          * is already active-state.
1081          */
1082         if (!list_empty(&rgn->list_lru_rgn)) {
1083                 ufshpb_hit_lru_info(lru_info, rgn);
1084                 goto out;
1085         }
1086
1087         if (rgn->rgn_state == HPB_RGN_INACTIVE) {
1088                 if (atomic_read(&lru_info->active_cnt) ==
1089                     lru_info->max_lru_active_cnt) {
1090                         /*
1091                          * If the maximum number of active regions
1092                          * is exceeded, evict the least recently used region.
1093                          * This case may occur when the device responds
1094                          * to the eviction information late.
1095                          * It is okay to evict the least recently used region,
1096                          * because the device could detect this region
1097                          * by not issuing HPB_READ
1098                          *
1099                          * in host control mode, verify that the entering
1100                          * region has enough reads
1101                          */
1102                         if (hpb->is_hcm &&
1103                             rgn->reads < hpb->params.eviction_thld_enter) {
1104                                 ret = -EACCES;
1105                                 goto out;
1106                         }
1107
1108                         victim_rgn = ufshpb_victim_lru_info(hpb);
1109                         if (!victim_rgn) {
1110                                 dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1111                                     "cannot get victim region %s\n",
1112                                     hpb->is_hcm ? "" : "error");
1113                                 ret = -ENOMEM;
1114                                 goto out;
1115                         }
1116
1117                         dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
1118                                 "LRU full (%d), choose victim %d\n",
1119                                 atomic_read(&lru_info->active_cnt),
1120                                 victim_rgn->rgn_idx);
1121
1122                         if (hpb->is_hcm) {
1123                                 spin_unlock_irqrestore(&hpb->rgn_state_lock,
1124                                                        flags);
1125                                 ret = ufshpb_issue_umap_single_req(hpb,
1126                                                                 victim_rgn);
1127                                 spin_lock_irqsave(&hpb->rgn_state_lock,
1128                                                   flags);
1129                                 if (ret)
1130                                         goto out;
1131                         }
1132
1133                         __ufshpb_evict_region(hpb, victim_rgn);
1134                 }
1135
1136                 /*
1137                  * When a region is added to lru_info list_head,
1138                  * it is guaranteed that the subregion has been
1139                  * assigned all mctx. If failed, try to receive mctx again
1140                  * without being added to lru_info list_head
1141                  */
1142                 ufshpb_add_lru_info(lru_info, rgn);
1143         }
1144 out:
1145         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1146         return ret;
1147 }
1148
1149 static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb,
1150                                          struct utp_hpb_rsp *rsp_field)
1151 {
1152         struct ufshpb_region *rgn;
1153         struct ufshpb_subregion *srgn;
1154         int i, rgn_i, srgn_i;
1155
1156         BUILD_BUG_ON(sizeof(struct ufshpb_active_field) != HPB_ACT_FIELD_SIZE);
1157         /*
1158          * If the active region and the inactive region are the same,
1159          * we will inactivate this region.
1160          * The device could check this (region inactivated) and
1161          * will response the proper active region information
1162          */
1163         for (i = 0; i < rsp_field->active_rgn_cnt; i++) {
1164                 rgn_i =
1165                         be16_to_cpu(rsp_field->hpb_active_field[i].active_rgn);
1166                 srgn_i =
1167                         be16_to_cpu(rsp_field->hpb_active_field[i].active_srgn);
1168
1169                 rgn = hpb->rgn_tbl + rgn_i;
1170                 if (hpb->is_hcm &&
1171                     (rgn->rgn_state != HPB_RGN_ACTIVE || is_rgn_dirty(rgn))) {
1172                         /*
1173                          * in host control mode, subregion activation
1174                          * recommendations are only allowed to active regions.
1175                          * Also, ignore recommendations for dirty regions - the
1176                          * host will make decisions concerning those by himself
1177                          */
1178                         continue;
1179                 }
1180
1181                 dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
1182                         "activate(%d) region %d - %d\n", i, rgn_i, srgn_i);
1183
1184                 spin_lock(&hpb->rsp_list_lock);
1185                 ufshpb_update_active_info(hpb, rgn_i, srgn_i);
1186                 spin_unlock(&hpb->rsp_list_lock);
1187
1188                 srgn = rgn->srgn_tbl + srgn_i;
1189
1190                 /* blocking HPB_READ */
1191                 spin_lock(&hpb->rgn_state_lock);
1192                 if (srgn->srgn_state == HPB_SRGN_VALID)
1193                         srgn->srgn_state = HPB_SRGN_INVALID;
1194                 spin_unlock(&hpb->rgn_state_lock);
1195         }
1196
1197         if (hpb->is_hcm) {
1198                 /*
1199                  * in host control mode the device is not allowed to inactivate
1200                  * regions
1201                  */
1202                 goto out;
1203         }
1204
1205         for (i = 0; i < rsp_field->inactive_rgn_cnt; i++) {
1206                 rgn_i = be16_to_cpu(rsp_field->hpb_inactive_field[i]);
1207                 dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
1208                         "inactivate(%d) region %d\n", i, rgn_i);
1209
1210                 spin_lock(&hpb->rsp_list_lock);
1211                 ufshpb_update_inactive_info(hpb, rgn_i);
1212                 spin_unlock(&hpb->rsp_list_lock);
1213
1214                 rgn = hpb->rgn_tbl + rgn_i;
1215
1216                 spin_lock(&hpb->rgn_state_lock);
1217                 if (rgn->rgn_state != HPB_RGN_INACTIVE) {
1218                         for (srgn_i = 0; srgn_i < rgn->srgn_cnt; srgn_i++) {
1219                                 srgn = rgn->srgn_tbl + srgn_i;
1220                                 if (srgn->srgn_state == HPB_SRGN_VALID)
1221                                         srgn->srgn_state = HPB_SRGN_INVALID;
1222                         }
1223                 }
1224                 spin_unlock(&hpb->rgn_state_lock);
1225
1226         }
1227
1228 out:
1229         dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "Noti: #ACT %u #INACT %u\n",
1230                 rsp_field->active_rgn_cnt, rsp_field->inactive_rgn_cnt);
1231
1232         if (ufshpb_get_state(hpb) == HPB_PRESENT)
1233                 queue_work(ufshpb_wq, &hpb->map_work);
1234 }
1235
1236 static void ufshpb_dev_reset_handler(struct ufshpb_lu *hpb)
1237 {
1238         struct victim_select_info *lru_info = &hpb->lru_info;
1239         struct ufshpb_region *rgn;
1240         unsigned long flags;
1241
1242         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1243
1244         list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn)
1245                 set_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags);
1246
1247         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1248 }
1249
1250 /*
1251  * This function will parse recommended active subregion information in sense
1252  * data field of response UPIU with SAM_STAT_GOOD state.
1253  */
1254 void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1255 {
1256         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(lrbp->cmd->device);
1257         struct utp_hpb_rsp *rsp_field = &lrbp->ucd_rsp_ptr->hr;
1258         int data_seg_len;
1259
1260         data_seg_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2)
1261                 & MASK_RSP_UPIU_DATA_SEG_LEN;
1262
1263         /* If data segment length is zero, rsp_field is not valid */
1264         if (!data_seg_len)
1265                 return;
1266
1267         if (unlikely(lrbp->lun != rsp_field->lun)) {
1268                 struct scsi_device *sdev;
1269                 bool found = false;
1270
1271                 __shost_for_each_device(sdev, hba->host) {
1272                         hpb = ufshpb_get_hpb_data(sdev);
1273
1274                         if (!hpb)
1275                                 continue;
1276
1277                         if (rsp_field->lun == hpb->lun) {
1278                                 found = true;
1279                                 break;
1280                         }
1281                 }
1282
1283                 if (!found)
1284                         return;
1285         }
1286
1287         if (!hpb)
1288                 return;
1289
1290         if (ufshpb_get_state(hpb) == HPB_INIT)
1291                 return;
1292
1293         if ((ufshpb_get_state(hpb) != HPB_PRESENT) &&
1294             (ufshpb_get_state(hpb) != HPB_SUSPEND)) {
1295                 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1296                            "%s: ufshpb state is not PRESENT/SUSPEND\n",
1297                            __func__);
1298                 return;
1299         }
1300
1301         BUILD_BUG_ON(sizeof(struct utp_hpb_rsp) != UTP_HPB_RSP_SIZE);
1302
1303         if (!ufshpb_is_hpb_rsp_valid(hba, lrbp, rsp_field))
1304                 return;
1305
1306         hpb->stats.rb_noti_cnt++;
1307
1308         switch (rsp_field->hpb_op) {
1309         case HPB_RSP_REQ_REGION_UPDATE:
1310                 if (data_seg_len != DEV_DATA_SEG_LEN)
1311                         dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1312                                  "%s: data seg length is not same.\n",
1313                                  __func__);
1314                 ufshpb_rsp_req_region_update(hpb, rsp_field);
1315                 break;
1316         case HPB_RSP_DEV_RESET:
1317                 dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1318                          "UFS device lost HPB information during PM.\n");
1319
1320                 if (hpb->is_hcm) {
1321                         struct scsi_device *sdev;
1322
1323                         __shost_for_each_device(sdev, hba->host) {
1324                                 struct ufshpb_lu *h = sdev->hostdata;
1325
1326                                 if (h)
1327                                         ufshpb_dev_reset_handler(h);
1328                         }
1329                 }
1330
1331                 break;
1332         default:
1333                 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1334                            "hpb_op is not available: %d\n",
1335                            rsp_field->hpb_op);
1336                 break;
1337         }
1338 }
1339
1340 static void ufshpb_add_active_list(struct ufshpb_lu *hpb,
1341                                    struct ufshpb_region *rgn,
1342                                    struct ufshpb_subregion *srgn)
1343 {
1344         if (!list_empty(&rgn->list_inact_rgn))
1345                 return;
1346
1347         if (!list_empty(&srgn->list_act_srgn)) {
1348                 list_move(&srgn->list_act_srgn, &hpb->lh_act_srgn);
1349                 return;
1350         }
1351
1352         list_add(&srgn->list_act_srgn, &hpb->lh_act_srgn);
1353 }
1354
1355 static void ufshpb_add_pending_evict_list(struct ufshpb_lu *hpb,
1356                                           struct ufshpb_region *rgn,
1357                                           struct list_head *pending_list)
1358 {
1359         struct ufshpb_subregion *srgn;
1360         int srgn_idx;
1361
1362         if (!list_empty(&rgn->list_inact_rgn))
1363                 return;
1364
1365         for_each_sub_region(rgn, srgn_idx, srgn)
1366                 if (!list_empty(&srgn->list_act_srgn))
1367                         return;
1368
1369         list_add_tail(&rgn->list_inact_rgn, pending_list);
1370 }
1371
1372 static void ufshpb_run_active_subregion_list(struct ufshpb_lu *hpb)
1373 {
1374         struct ufshpb_region *rgn;
1375         struct ufshpb_subregion *srgn;
1376         unsigned long flags;
1377         int ret = 0;
1378
1379         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1380         while ((srgn = list_first_entry_or_null(&hpb->lh_act_srgn,
1381                                                 struct ufshpb_subregion,
1382                                                 list_act_srgn))) {
1383                 if (ufshpb_get_state(hpb) == HPB_SUSPEND)
1384                         break;
1385
1386                 list_del_init(&srgn->list_act_srgn);
1387                 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1388
1389                 rgn = hpb->rgn_tbl + srgn->rgn_idx;
1390                 ret = ufshpb_add_region(hpb, rgn);
1391                 if (ret)
1392                         goto active_failed;
1393
1394                 ret = ufshpb_issue_map_req(hpb, rgn, srgn);
1395                 if (ret) {
1396                         dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1397                             "issue map_req failed. ret %d, region %d - %d\n",
1398                             ret, rgn->rgn_idx, srgn->srgn_idx);
1399                         goto active_failed;
1400                 }
1401                 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1402         }
1403         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1404         return;
1405
1406 active_failed:
1407         dev_err(&hpb->sdev_ufs_lu->sdev_dev, "failed to activate region %d - %d, will retry\n",
1408                    rgn->rgn_idx, srgn->srgn_idx);
1409         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1410         ufshpb_add_active_list(hpb, rgn, srgn);
1411         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1412 }
1413
1414 static void ufshpb_run_inactive_region_list(struct ufshpb_lu *hpb)
1415 {
1416         struct ufshpb_region *rgn;
1417         unsigned long flags;
1418         int ret;
1419         LIST_HEAD(pending_list);
1420
1421         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1422         while ((rgn = list_first_entry_or_null(&hpb->lh_inact_rgn,
1423                                                struct ufshpb_region,
1424                                                list_inact_rgn))) {
1425                 if (ufshpb_get_state(hpb) == HPB_SUSPEND)
1426                         break;
1427
1428                 list_del_init(&rgn->list_inact_rgn);
1429                 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1430
1431                 ret = ufshpb_evict_region(hpb, rgn);
1432                 if (ret) {
1433                         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1434                         ufshpb_add_pending_evict_list(hpb, rgn, &pending_list);
1435                         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1436                 }
1437
1438                 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1439         }
1440
1441         list_splice(&pending_list, &hpb->lh_inact_rgn);
1442         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1443 }
1444
1445 static void ufshpb_normalization_work_handler(struct work_struct *work)
1446 {
1447         struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
1448                                              ufshpb_normalization_work);
1449         int rgn_idx;
1450         u8 factor = hpb->params.normalization_factor;
1451
1452         for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
1453                 struct ufshpb_region *rgn = hpb->rgn_tbl + rgn_idx;
1454                 int srgn_idx;
1455
1456                 spin_lock(&rgn->rgn_lock);
1457                 rgn->reads = 0;
1458                 for (srgn_idx = 0; srgn_idx < hpb->srgns_per_rgn; srgn_idx++) {
1459                         struct ufshpb_subregion *srgn = rgn->srgn_tbl + srgn_idx;
1460
1461                         srgn->reads >>= factor;
1462                         rgn->reads += srgn->reads;
1463                 }
1464                 spin_unlock(&rgn->rgn_lock);
1465
1466                 if (rgn->rgn_state != HPB_RGN_ACTIVE || rgn->reads)
1467                         continue;
1468
1469                 /* if region is active but has no reads - inactivate it */
1470                 spin_lock(&hpb->rsp_list_lock);
1471                 ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
1472                 spin_unlock(&hpb->rsp_list_lock);
1473         }
1474 }
1475
1476 static void ufshpb_map_work_handler(struct work_struct *work)
1477 {
1478         struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu, map_work);
1479
1480         if (ufshpb_get_state(hpb) != HPB_PRESENT) {
1481                 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1482                            "%s: ufshpb state is not PRESENT\n", __func__);
1483                 return;
1484         }
1485
1486         ufshpb_run_inactive_region_list(hpb);
1487         ufshpb_run_active_subregion_list(hpb);
1488 }
1489
1490 /*
1491  * this function doesn't need to hold lock due to be called in init.
1492  * (rgn_state_lock, rsp_list_lock, etc..)
1493  */
1494 static int ufshpb_init_pinned_active_region(struct ufs_hba *hba,
1495                                             struct ufshpb_lu *hpb,
1496                                             struct ufshpb_region *rgn)
1497 {
1498         struct ufshpb_subregion *srgn;
1499         int srgn_idx, i;
1500         int err = 0;
1501
1502         for_each_sub_region(rgn, srgn_idx, srgn) {
1503                 srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last);
1504                 srgn->srgn_state = HPB_SRGN_INVALID;
1505                 if (!srgn->mctx) {
1506                         err = -ENOMEM;
1507                         dev_err(hba->dev,
1508                                 "alloc mctx for pinned region failed\n");
1509                         goto release;
1510                 }
1511
1512                 list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
1513         }
1514
1515         rgn->rgn_state = HPB_RGN_PINNED;
1516         return 0;
1517
1518 release:
1519         for (i = 0; i < srgn_idx; i++) {
1520                 srgn = rgn->srgn_tbl + i;
1521                 ufshpb_put_map_ctx(hpb, srgn->mctx);
1522         }
1523         return err;
1524 }
1525
1526 static void ufshpb_init_subregion_tbl(struct ufshpb_lu *hpb,
1527                                       struct ufshpb_region *rgn, bool last)
1528 {
1529         int srgn_idx;
1530         struct ufshpb_subregion *srgn;
1531
1532         for_each_sub_region(rgn, srgn_idx, srgn) {
1533                 INIT_LIST_HEAD(&srgn->list_act_srgn);
1534
1535                 srgn->rgn_idx = rgn->rgn_idx;
1536                 srgn->srgn_idx = srgn_idx;
1537                 srgn->srgn_state = HPB_SRGN_UNUSED;
1538         }
1539
1540         if (unlikely(last && hpb->last_srgn_entries))
1541                 srgn->is_last = true;
1542 }
1543
1544 static int ufshpb_alloc_subregion_tbl(struct ufshpb_lu *hpb,
1545                                       struct ufshpb_region *rgn, int srgn_cnt)
1546 {
1547         rgn->srgn_tbl = kvcalloc(srgn_cnt, sizeof(struct ufshpb_subregion),
1548                                  GFP_KERNEL);
1549         if (!rgn->srgn_tbl)
1550                 return -ENOMEM;
1551
1552         rgn->srgn_cnt = srgn_cnt;
1553         return 0;
1554 }
1555
1556 static void ufshpb_lu_parameter_init(struct ufs_hba *hba,
1557                                      struct ufshpb_lu *hpb,
1558                                      struct ufshpb_dev_info *hpb_dev_info,
1559                                      struct ufshpb_lu_info *hpb_lu_info)
1560 {
1561         u32 entries_per_rgn;
1562         u64 rgn_mem_size, tmp;
1563
1564         if (ufshpb_is_legacy(hba))
1565                 hpb->pre_req_max_tr_len = HPB_LEGACY_CHUNK_HIGH;
1566         else
1567                 hpb->pre_req_max_tr_len = hpb_dev_info->max_hpb_single_cmd;
1568
1569         hpb->lu_pinned_start = hpb_lu_info->pinned_start;
1570         hpb->lu_pinned_end = hpb_lu_info->num_pinned ?
1571                 (hpb_lu_info->pinned_start + hpb_lu_info->num_pinned - 1)
1572                 : PINNED_NOT_SET;
1573         hpb->lru_info.max_lru_active_cnt =
1574                 hpb_lu_info->max_active_rgns - hpb_lu_info->num_pinned;
1575
1576         rgn_mem_size = (1ULL << hpb_dev_info->rgn_size) * HPB_RGN_SIZE_UNIT
1577                         * HPB_ENTRY_SIZE;
1578         do_div(rgn_mem_size, HPB_ENTRY_BLOCK_SIZE);
1579         hpb->srgn_mem_size = (1ULL << hpb_dev_info->srgn_size)
1580                 * HPB_RGN_SIZE_UNIT / HPB_ENTRY_BLOCK_SIZE * HPB_ENTRY_SIZE;
1581
1582         tmp = rgn_mem_size;
1583         do_div(tmp, HPB_ENTRY_SIZE);
1584         entries_per_rgn = (u32)tmp;
1585         hpb->entries_per_rgn_shift = ilog2(entries_per_rgn);
1586         hpb->entries_per_rgn_mask = entries_per_rgn - 1;
1587
1588         hpb->entries_per_srgn = hpb->srgn_mem_size / HPB_ENTRY_SIZE;
1589         hpb->entries_per_srgn_shift = ilog2(hpb->entries_per_srgn);
1590         hpb->entries_per_srgn_mask = hpb->entries_per_srgn - 1;
1591
1592         tmp = rgn_mem_size;
1593         do_div(tmp, hpb->srgn_mem_size);
1594         hpb->srgns_per_rgn = (int)tmp;
1595
1596         hpb->rgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks,
1597                                 entries_per_rgn);
1598         hpb->srgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks,
1599                                 (hpb->srgn_mem_size / HPB_ENTRY_SIZE));
1600         hpb->last_srgn_entries = hpb_lu_info->num_blocks
1601                                  % (hpb->srgn_mem_size / HPB_ENTRY_SIZE);
1602
1603         hpb->pages_per_srgn = DIV_ROUND_UP(hpb->srgn_mem_size, PAGE_SIZE);
1604
1605         if (hpb_dev_info->control_mode == HPB_HOST_CONTROL)
1606                 hpb->is_hcm = true;
1607 }
1608
1609 static int ufshpb_alloc_region_tbl(struct ufs_hba *hba, struct ufshpb_lu *hpb)
1610 {
1611         struct ufshpb_region *rgn_table, *rgn;
1612         int rgn_idx, i;
1613         int ret = 0;
1614
1615         rgn_table = kvcalloc(hpb->rgns_per_lu, sizeof(struct ufshpb_region),
1616                             GFP_KERNEL);
1617         if (!rgn_table)
1618                 return -ENOMEM;
1619
1620         for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
1621                 int srgn_cnt = hpb->srgns_per_rgn;
1622                 bool last_srgn = false;
1623
1624                 rgn = rgn_table + rgn_idx;
1625                 rgn->rgn_idx = rgn_idx;
1626
1627                 spin_lock_init(&rgn->rgn_lock);
1628
1629                 INIT_LIST_HEAD(&rgn->list_inact_rgn);
1630                 INIT_LIST_HEAD(&rgn->list_lru_rgn);
1631                 INIT_LIST_HEAD(&rgn->list_expired_rgn);
1632
1633                 if (rgn_idx == hpb->rgns_per_lu - 1) {
1634                         srgn_cnt = ((hpb->srgns_per_lu - 1) %
1635                                     hpb->srgns_per_rgn) + 1;
1636                         last_srgn = true;
1637                 }
1638
1639                 ret = ufshpb_alloc_subregion_tbl(hpb, rgn, srgn_cnt);
1640                 if (ret)
1641                         goto release_srgn_table;
1642                 ufshpb_init_subregion_tbl(hpb, rgn, last_srgn);
1643
1644                 if (ufshpb_is_pinned_region(hpb, rgn_idx)) {
1645                         ret = ufshpb_init_pinned_active_region(hba, hpb, rgn);
1646                         if (ret)
1647                                 goto release_srgn_table;
1648                 } else {
1649                         rgn->rgn_state = HPB_RGN_INACTIVE;
1650                 }
1651
1652                 rgn->rgn_flags = 0;
1653                 rgn->hpb = hpb;
1654         }
1655
1656         hpb->rgn_tbl = rgn_table;
1657
1658         return 0;
1659
1660 release_srgn_table:
1661         for (i = 0; i <= rgn_idx; i++)
1662                 kvfree(rgn_table[i].srgn_tbl);
1663
1664         kvfree(rgn_table);
1665         return ret;
1666 }
1667
1668 static void ufshpb_destroy_subregion_tbl(struct ufshpb_lu *hpb,
1669                                          struct ufshpb_region *rgn)
1670 {
1671         int srgn_idx;
1672         struct ufshpb_subregion *srgn;
1673
1674         for_each_sub_region(rgn, srgn_idx, srgn)
1675                 if (srgn->srgn_state != HPB_SRGN_UNUSED) {
1676                         srgn->srgn_state = HPB_SRGN_UNUSED;
1677                         ufshpb_put_map_ctx(hpb, srgn->mctx);
1678                 }
1679 }
1680
1681 static void ufshpb_destroy_region_tbl(struct ufshpb_lu *hpb)
1682 {
1683         int rgn_idx;
1684
1685         for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
1686                 struct ufshpb_region *rgn;
1687
1688                 rgn = hpb->rgn_tbl + rgn_idx;
1689                 if (rgn->rgn_state != HPB_RGN_INACTIVE) {
1690                         rgn->rgn_state = HPB_RGN_INACTIVE;
1691
1692                         ufshpb_destroy_subregion_tbl(hpb, rgn);
1693                 }
1694
1695                 kvfree(rgn->srgn_tbl);
1696         }
1697
1698         kvfree(hpb->rgn_tbl);
1699 }
1700
1701 /* SYSFS functions */
1702 #define ufshpb_sysfs_attr_show_func(__name)                             \
1703 static ssize_t __name##_show(struct device *dev,                        \
1704         struct device_attribute *attr, char *buf)                       \
1705 {                                                                       \
1706         struct scsi_device *sdev = to_scsi_device(dev);                 \
1707         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);              \
1708                                                                         \
1709         if (!hpb)                                                       \
1710                 return -ENODEV;                                         \
1711                                                                         \
1712         return sysfs_emit(buf, "%llu\n", hpb->stats.__name);            \
1713 }                                                                       \
1714 \
1715 static DEVICE_ATTR_RO(__name)
1716
1717 ufshpb_sysfs_attr_show_func(hit_cnt);
1718 ufshpb_sysfs_attr_show_func(miss_cnt);
1719 ufshpb_sysfs_attr_show_func(rb_noti_cnt);
1720 ufshpb_sysfs_attr_show_func(rb_active_cnt);
1721 ufshpb_sysfs_attr_show_func(rb_inactive_cnt);
1722 ufshpb_sysfs_attr_show_func(map_req_cnt);
1723 ufshpb_sysfs_attr_show_func(umap_req_cnt);
1724
1725 static struct attribute *hpb_dev_stat_attrs[] = {
1726         &dev_attr_hit_cnt.attr,
1727         &dev_attr_miss_cnt.attr,
1728         &dev_attr_rb_noti_cnt.attr,
1729         &dev_attr_rb_active_cnt.attr,
1730         &dev_attr_rb_inactive_cnt.attr,
1731         &dev_attr_map_req_cnt.attr,
1732         &dev_attr_umap_req_cnt.attr,
1733         NULL,
1734 };
1735
1736 struct attribute_group ufs_sysfs_hpb_stat_group = {
1737         .name = "hpb_stats",
1738         .attrs = hpb_dev_stat_attrs,
1739 };
1740
1741 /* SYSFS functions */
1742 #define ufshpb_sysfs_param_show_func(__name)                            \
1743 static ssize_t __name##_show(struct device *dev,                        \
1744         struct device_attribute *attr, char *buf)                       \
1745 {                                                                       \
1746         struct scsi_device *sdev = to_scsi_device(dev);                 \
1747         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);              \
1748                                                                         \
1749         if (!hpb)                                                       \
1750                 return -ENODEV;                                         \
1751                                                                         \
1752         return sysfs_emit(buf, "%d\n", hpb->params.__name);             \
1753 }
1754
1755 ufshpb_sysfs_param_show_func(requeue_timeout_ms);
1756 static ssize_t
1757 requeue_timeout_ms_store(struct device *dev, struct device_attribute *attr,
1758                          const char *buf, size_t count)
1759 {
1760         struct scsi_device *sdev = to_scsi_device(dev);
1761         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1762         int val;
1763
1764         if (!hpb)
1765                 return -ENODEV;
1766
1767         if (kstrtouint(buf, 0, &val))
1768                 return -EINVAL;
1769
1770         if (val < 0)
1771                 return -EINVAL;
1772
1773         hpb->params.requeue_timeout_ms = val;
1774
1775         return count;
1776 }
1777 static DEVICE_ATTR_RW(requeue_timeout_ms);
1778
1779 ufshpb_sysfs_param_show_func(activation_thld);
1780 static ssize_t
1781 activation_thld_store(struct device *dev, struct device_attribute *attr,
1782                       const char *buf, size_t count)
1783 {
1784         struct scsi_device *sdev = to_scsi_device(dev);
1785         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1786         int val;
1787
1788         if (!hpb)
1789                 return -ENODEV;
1790
1791         if (!hpb->is_hcm)
1792                 return -EOPNOTSUPP;
1793
1794         if (kstrtouint(buf, 0, &val))
1795                 return -EINVAL;
1796
1797         if (val <= 0)
1798                 return -EINVAL;
1799
1800         hpb->params.activation_thld = val;
1801
1802         return count;
1803 }
1804 static DEVICE_ATTR_RW(activation_thld);
1805
1806 ufshpb_sysfs_param_show_func(normalization_factor);
1807 static ssize_t
1808 normalization_factor_store(struct device *dev, struct device_attribute *attr,
1809                            const char *buf, size_t count)
1810 {
1811         struct scsi_device *sdev = to_scsi_device(dev);
1812         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1813         int val;
1814
1815         if (!hpb)
1816                 return -ENODEV;
1817
1818         if (!hpb->is_hcm)
1819                 return -EOPNOTSUPP;
1820
1821         if (kstrtouint(buf, 0, &val))
1822                 return -EINVAL;
1823
1824         if (val <= 0 || val > ilog2(hpb->entries_per_srgn))
1825                 return -EINVAL;
1826
1827         hpb->params.normalization_factor = val;
1828
1829         return count;
1830 }
1831 static DEVICE_ATTR_RW(normalization_factor);
1832
1833 ufshpb_sysfs_param_show_func(eviction_thld_enter);
1834 static ssize_t
1835 eviction_thld_enter_store(struct device *dev, struct device_attribute *attr,
1836                           const char *buf, size_t count)
1837 {
1838         struct scsi_device *sdev = to_scsi_device(dev);
1839         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1840         int val;
1841
1842         if (!hpb)
1843                 return -ENODEV;
1844
1845         if (!hpb->is_hcm)
1846                 return -EOPNOTSUPP;
1847
1848         if (kstrtouint(buf, 0, &val))
1849                 return -EINVAL;
1850
1851         if (val <= hpb->params.eviction_thld_exit)
1852                 return -EINVAL;
1853
1854         hpb->params.eviction_thld_enter = val;
1855
1856         return count;
1857 }
1858 static DEVICE_ATTR_RW(eviction_thld_enter);
1859
1860 ufshpb_sysfs_param_show_func(eviction_thld_exit);
1861 static ssize_t
1862 eviction_thld_exit_store(struct device *dev, struct device_attribute *attr,
1863                          const char *buf, size_t count)
1864 {
1865         struct scsi_device *sdev = to_scsi_device(dev);
1866         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1867         int val;
1868
1869         if (!hpb)
1870                 return -ENODEV;
1871
1872         if (!hpb->is_hcm)
1873                 return -EOPNOTSUPP;
1874
1875         if (kstrtouint(buf, 0, &val))
1876                 return -EINVAL;
1877
1878         if (val <= hpb->params.activation_thld)
1879                 return -EINVAL;
1880
1881         hpb->params.eviction_thld_exit = val;
1882
1883         return count;
1884 }
1885 static DEVICE_ATTR_RW(eviction_thld_exit);
1886
1887 ufshpb_sysfs_param_show_func(read_timeout_ms);
1888 static ssize_t
1889 read_timeout_ms_store(struct device *dev, struct device_attribute *attr,
1890                       const char *buf, size_t count)
1891 {
1892         struct scsi_device *sdev = to_scsi_device(dev);
1893         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1894         int val;
1895
1896         if (!hpb)
1897                 return -ENODEV;
1898
1899         if (!hpb->is_hcm)
1900                 return -EOPNOTSUPP;
1901
1902         if (kstrtouint(buf, 0, &val))
1903                 return -EINVAL;
1904
1905         /* read_timeout >> timeout_polling_interval */
1906         if (val < hpb->params.timeout_polling_interval_ms * 2)
1907                 return -EINVAL;
1908
1909         hpb->params.read_timeout_ms = val;
1910
1911         return count;
1912 }
1913 static DEVICE_ATTR_RW(read_timeout_ms);
1914
1915 ufshpb_sysfs_param_show_func(read_timeout_expiries);
1916 static ssize_t
1917 read_timeout_expiries_store(struct device *dev, struct device_attribute *attr,
1918                             const char *buf, size_t count)
1919 {
1920         struct scsi_device *sdev = to_scsi_device(dev);
1921         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1922         int val;
1923
1924         if (!hpb)
1925                 return -ENODEV;
1926
1927         if (!hpb->is_hcm)
1928                 return -EOPNOTSUPP;
1929
1930         if (kstrtouint(buf, 0, &val))
1931                 return -EINVAL;
1932
1933         if (val <= 0)
1934                 return -EINVAL;
1935
1936         hpb->params.read_timeout_expiries = val;
1937
1938         return count;
1939 }
1940 static DEVICE_ATTR_RW(read_timeout_expiries);
1941
1942 ufshpb_sysfs_param_show_func(timeout_polling_interval_ms);
1943 static ssize_t
1944 timeout_polling_interval_ms_store(struct device *dev,
1945                                   struct device_attribute *attr,
1946                                   const char *buf, size_t count)
1947 {
1948         struct scsi_device *sdev = to_scsi_device(dev);
1949         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1950         int val;
1951
1952         if (!hpb)
1953                 return -ENODEV;
1954
1955         if (!hpb->is_hcm)
1956                 return -EOPNOTSUPP;
1957
1958         if (kstrtouint(buf, 0, &val))
1959                 return -EINVAL;
1960
1961         /* timeout_polling_interval << read_timeout */
1962         if (val <= 0 || val > hpb->params.read_timeout_ms / 2)
1963                 return -EINVAL;
1964
1965         hpb->params.timeout_polling_interval_ms = val;
1966
1967         return count;
1968 }
1969 static DEVICE_ATTR_RW(timeout_polling_interval_ms);
1970
1971 ufshpb_sysfs_param_show_func(inflight_map_req);
1972 static ssize_t inflight_map_req_store(struct device *dev,
1973                                       struct device_attribute *attr,
1974                                       const char *buf, size_t count)
1975 {
1976         struct scsi_device *sdev = to_scsi_device(dev);
1977         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1978         int val;
1979
1980         if (!hpb)
1981                 return -ENODEV;
1982
1983         if (!hpb->is_hcm)
1984                 return -EOPNOTSUPP;
1985
1986         if (kstrtouint(buf, 0, &val))
1987                 return -EINVAL;
1988
1989         if (val <= 0 || val > hpb->sdev_ufs_lu->queue_depth - 1)
1990                 return -EINVAL;
1991
1992         hpb->params.inflight_map_req = val;
1993
1994         return count;
1995 }
1996 static DEVICE_ATTR_RW(inflight_map_req);
1997
1998 static void ufshpb_hcm_param_init(struct ufshpb_lu *hpb)
1999 {
2000         hpb->params.activation_thld = ACTIVATION_THRESHOLD;
2001         hpb->params.normalization_factor = 1;
2002         hpb->params.eviction_thld_enter = (ACTIVATION_THRESHOLD << 5);
2003         hpb->params.eviction_thld_exit = (ACTIVATION_THRESHOLD << 4);
2004         hpb->params.read_timeout_ms = READ_TO_MS;
2005         hpb->params.read_timeout_expiries = READ_TO_EXPIRIES;
2006         hpb->params.timeout_polling_interval_ms = POLLING_INTERVAL_MS;
2007         hpb->params.inflight_map_req = THROTTLE_MAP_REQ_DEFAULT;
2008 }
2009
2010 static struct attribute *hpb_dev_param_attrs[] = {
2011         &dev_attr_requeue_timeout_ms.attr,
2012         &dev_attr_activation_thld.attr,
2013         &dev_attr_normalization_factor.attr,
2014         &dev_attr_eviction_thld_enter.attr,
2015         &dev_attr_eviction_thld_exit.attr,
2016         &dev_attr_read_timeout_ms.attr,
2017         &dev_attr_read_timeout_expiries.attr,
2018         &dev_attr_timeout_polling_interval_ms.attr,
2019         &dev_attr_inflight_map_req.attr,
2020         NULL,
2021 };
2022
2023 struct attribute_group ufs_sysfs_hpb_param_group = {
2024         .name = "hpb_params",
2025         .attrs = hpb_dev_param_attrs,
2026 };
2027
2028 static int ufshpb_pre_req_mempool_init(struct ufshpb_lu *hpb)
2029 {
2030         struct ufshpb_req *pre_req = NULL, *t;
2031         int qd = hpb->sdev_ufs_lu->queue_depth / 2;
2032         int i;
2033
2034         INIT_LIST_HEAD(&hpb->lh_pre_req_free);
2035
2036         hpb->pre_req = kcalloc(qd, sizeof(struct ufshpb_req), GFP_KERNEL);
2037         hpb->throttle_pre_req = qd;
2038         hpb->num_inflight_pre_req = 0;
2039
2040         if (!hpb->pre_req)
2041                 goto release_mem;
2042
2043         for (i = 0; i < qd; i++) {
2044                 pre_req = hpb->pre_req + i;
2045                 INIT_LIST_HEAD(&pre_req->list_req);
2046                 pre_req->req = NULL;
2047
2048                 pre_req->bio = bio_alloc(GFP_KERNEL, 1);
2049                 if (!pre_req->bio)
2050                         goto release_mem;
2051
2052                 pre_req->wb.m_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2053                 if (!pre_req->wb.m_page) {
2054                         bio_put(pre_req->bio);
2055                         goto release_mem;
2056                 }
2057
2058                 list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free);
2059         }
2060
2061         return 0;
2062 release_mem:
2063         list_for_each_entry_safe(pre_req, t, &hpb->lh_pre_req_free, list_req) {
2064                 list_del_init(&pre_req->list_req);
2065                 bio_put(pre_req->bio);
2066                 __free_page(pre_req->wb.m_page);
2067         }
2068
2069         kfree(hpb->pre_req);
2070         return -ENOMEM;
2071 }
2072
2073 static void ufshpb_pre_req_mempool_destroy(struct ufshpb_lu *hpb)
2074 {
2075         struct ufshpb_req *pre_req = NULL;
2076         int i;
2077
2078         for (i = 0; i < hpb->throttle_pre_req; i++) {
2079                 pre_req = hpb->pre_req + i;
2080                 bio_put(hpb->pre_req[i].bio);
2081                 if (!pre_req->wb.m_page)
2082                         __free_page(hpb->pre_req[i].wb.m_page);
2083                 list_del_init(&pre_req->list_req);
2084         }
2085
2086         kfree(hpb->pre_req);
2087 }
2088
2089 static void ufshpb_stat_init(struct ufshpb_lu *hpb)
2090 {
2091         hpb->stats.hit_cnt = 0;
2092         hpb->stats.miss_cnt = 0;
2093         hpb->stats.rb_noti_cnt = 0;
2094         hpb->stats.rb_active_cnt = 0;
2095         hpb->stats.rb_inactive_cnt = 0;
2096         hpb->stats.map_req_cnt = 0;
2097         hpb->stats.umap_req_cnt = 0;
2098 }
2099
2100 static void ufshpb_param_init(struct ufshpb_lu *hpb)
2101 {
2102         hpb->params.requeue_timeout_ms = HPB_REQUEUE_TIME_MS;
2103         if (hpb->is_hcm)
2104                 ufshpb_hcm_param_init(hpb);
2105 }
2106
2107 static int ufshpb_lu_hpb_init(struct ufs_hba *hba, struct ufshpb_lu *hpb)
2108 {
2109         int ret;
2110
2111         spin_lock_init(&hpb->rgn_state_lock);
2112         spin_lock_init(&hpb->rsp_list_lock);
2113         spin_lock_init(&hpb->param_lock);
2114
2115         INIT_LIST_HEAD(&hpb->lru_info.lh_lru_rgn);
2116         INIT_LIST_HEAD(&hpb->lh_act_srgn);
2117         INIT_LIST_HEAD(&hpb->lh_inact_rgn);
2118         INIT_LIST_HEAD(&hpb->list_hpb_lu);
2119
2120         INIT_WORK(&hpb->map_work, ufshpb_map_work_handler);
2121         if (hpb->is_hcm) {
2122                 INIT_WORK(&hpb->ufshpb_normalization_work,
2123                           ufshpb_normalization_work_handler);
2124                 INIT_DELAYED_WORK(&hpb->ufshpb_read_to_work,
2125                                   ufshpb_read_to_handler);
2126         }
2127
2128         hpb->map_req_cache = kmem_cache_create("ufshpb_req_cache",
2129                           sizeof(struct ufshpb_req), 0, 0, NULL);
2130         if (!hpb->map_req_cache) {
2131                 dev_err(hba->dev, "ufshpb(%d) ufshpb_req_cache create fail",
2132                         hpb->lun);
2133                 return -ENOMEM;
2134         }
2135
2136         hpb->m_page_cache = kmem_cache_create("ufshpb_m_page_cache",
2137                           sizeof(struct page *) * hpb->pages_per_srgn,
2138                           0, 0, NULL);
2139         if (!hpb->m_page_cache) {
2140                 dev_err(hba->dev, "ufshpb(%d) ufshpb_m_page_cache create fail",
2141                         hpb->lun);
2142                 ret = -ENOMEM;
2143                 goto release_req_cache;
2144         }
2145
2146         ret = ufshpb_pre_req_mempool_init(hpb);
2147         if (ret) {
2148                 dev_err(hba->dev, "ufshpb(%d) pre_req_mempool init fail",
2149                         hpb->lun);
2150                 goto release_m_page_cache;
2151         }
2152
2153         ret = ufshpb_alloc_region_tbl(hba, hpb);
2154         if (ret)
2155                 goto release_pre_req_mempool;
2156
2157         ufshpb_stat_init(hpb);
2158         ufshpb_param_init(hpb);
2159
2160         if (hpb->is_hcm) {
2161                 unsigned int poll;
2162
2163                 poll = hpb->params.timeout_polling_interval_ms;
2164                 schedule_delayed_work(&hpb->ufshpb_read_to_work,
2165                                       msecs_to_jiffies(poll));
2166         }
2167
2168         return 0;
2169
2170 release_pre_req_mempool:
2171         ufshpb_pre_req_mempool_destroy(hpb);
2172 release_m_page_cache:
2173         kmem_cache_destroy(hpb->m_page_cache);
2174 release_req_cache:
2175         kmem_cache_destroy(hpb->map_req_cache);
2176         return ret;
2177 }
2178
2179 static struct ufshpb_lu *
2180 ufshpb_alloc_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev,
2181                     struct ufshpb_dev_info *hpb_dev_info,
2182                     struct ufshpb_lu_info *hpb_lu_info)
2183 {
2184         struct ufshpb_lu *hpb;
2185         int ret;
2186
2187         hpb = kzalloc(sizeof(struct ufshpb_lu), GFP_KERNEL);
2188         if (!hpb)
2189                 return NULL;
2190
2191         hpb->lun = sdev->lun;
2192         hpb->sdev_ufs_lu = sdev;
2193
2194         ufshpb_lu_parameter_init(hba, hpb, hpb_dev_info, hpb_lu_info);
2195
2196         ret = ufshpb_lu_hpb_init(hba, hpb);
2197         if (ret) {
2198                 dev_err(hba->dev, "hpb lu init failed. ret %d", ret);
2199                 goto release_hpb;
2200         }
2201
2202         sdev->hostdata = hpb;
2203         return hpb;
2204
2205 release_hpb:
2206         kfree(hpb);
2207         return NULL;
2208 }
2209
2210 static void ufshpb_discard_rsp_lists(struct ufshpb_lu *hpb)
2211 {
2212         struct ufshpb_region *rgn, *next_rgn;
2213         struct ufshpb_subregion *srgn, *next_srgn;
2214         unsigned long flags;
2215
2216         /*
2217          * If the device reset occurred, the remaining HPB region information
2218          * may be stale. Therefore, by discarding the lists of HPB response
2219          * that remained after reset, we prevent unnecessary work.
2220          */
2221         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
2222         list_for_each_entry_safe(rgn, next_rgn, &hpb->lh_inact_rgn,
2223                                  list_inact_rgn)
2224                 list_del_init(&rgn->list_inact_rgn);
2225
2226         list_for_each_entry_safe(srgn, next_srgn, &hpb->lh_act_srgn,
2227                                  list_act_srgn)
2228                 list_del_init(&srgn->list_act_srgn);
2229         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
2230 }
2231
2232 static void ufshpb_cancel_jobs(struct ufshpb_lu *hpb)
2233 {
2234         if (hpb->is_hcm) {
2235                 cancel_delayed_work_sync(&hpb->ufshpb_read_to_work);
2236                 cancel_work_sync(&hpb->ufshpb_normalization_work);
2237         }
2238         cancel_work_sync(&hpb->map_work);
2239 }
2240
2241 static bool ufshpb_check_hpb_reset_query(struct ufs_hba *hba)
2242 {
2243         int err = 0;
2244         bool flag_res = true;
2245         int try;
2246
2247         /* wait for the device to complete HPB reset query */
2248         for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) {
2249                 dev_dbg(hba->dev,
2250                         "%s start flag reset polling %d times\n",
2251                         __func__, try);
2252
2253                 /* Poll fHpbReset flag to be cleared */
2254                 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
2255                                 QUERY_FLAG_IDN_HPB_RESET, 0, &flag_res);
2256
2257                 if (err) {
2258                         dev_err(hba->dev,
2259                                 "%s reading fHpbReset flag failed with error %d\n",
2260                                 __func__, err);
2261                         return flag_res;
2262                 }
2263
2264                 if (!flag_res)
2265                         goto out;
2266
2267                 usleep_range(1000, 1100);
2268         }
2269         if (flag_res) {
2270                 dev_err(hba->dev,
2271                         "%s fHpbReset was not cleared by the device\n",
2272                         __func__);
2273         }
2274 out:
2275         return flag_res;
2276 }
2277
2278 void ufshpb_reset(struct ufs_hba *hba)
2279 {
2280         struct ufshpb_lu *hpb;
2281         struct scsi_device *sdev;
2282
2283         shost_for_each_device(sdev, hba->host) {
2284                 hpb = ufshpb_get_hpb_data(sdev);
2285                 if (!hpb)
2286                         continue;
2287
2288                 if (ufshpb_get_state(hpb) != HPB_RESET)
2289                         continue;
2290
2291                 ufshpb_set_state(hpb, HPB_PRESENT);
2292         }
2293 }
2294
2295 void ufshpb_reset_host(struct ufs_hba *hba)
2296 {
2297         struct ufshpb_lu *hpb;
2298         struct scsi_device *sdev;
2299
2300         shost_for_each_device(sdev, hba->host) {
2301                 hpb = ufshpb_get_hpb_data(sdev);
2302                 if (!hpb)
2303                         continue;
2304
2305                 if (ufshpb_get_state(hpb) != HPB_PRESENT)
2306                         continue;
2307                 ufshpb_set_state(hpb, HPB_RESET);
2308                 ufshpb_cancel_jobs(hpb);
2309                 ufshpb_discard_rsp_lists(hpb);
2310         }
2311 }
2312
2313 void ufshpb_suspend(struct ufs_hba *hba)
2314 {
2315         struct ufshpb_lu *hpb;
2316         struct scsi_device *sdev;
2317
2318         shost_for_each_device(sdev, hba->host) {
2319                 hpb = ufshpb_get_hpb_data(sdev);
2320                 if (!hpb)
2321                         continue;
2322
2323                 if (ufshpb_get_state(hpb) != HPB_PRESENT)
2324                         continue;
2325                 ufshpb_set_state(hpb, HPB_SUSPEND);
2326                 ufshpb_cancel_jobs(hpb);
2327         }
2328 }
2329
2330 void ufshpb_resume(struct ufs_hba *hba)
2331 {
2332         struct ufshpb_lu *hpb;
2333         struct scsi_device *sdev;
2334
2335         shost_for_each_device(sdev, hba->host) {
2336                 hpb = ufshpb_get_hpb_data(sdev);
2337                 if (!hpb)
2338                         continue;
2339
2340                 if ((ufshpb_get_state(hpb) != HPB_PRESENT) &&
2341                     (ufshpb_get_state(hpb) != HPB_SUSPEND))
2342                         continue;
2343                 ufshpb_set_state(hpb, HPB_PRESENT);
2344                 ufshpb_kick_map_work(hpb);
2345                 if (hpb->is_hcm) {
2346                         unsigned int poll =
2347                                 hpb->params.timeout_polling_interval_ms;
2348
2349                         schedule_delayed_work(&hpb->ufshpb_read_to_work,
2350                                 msecs_to_jiffies(poll));
2351                 }
2352         }
2353 }
2354
2355 static int ufshpb_get_lu_info(struct ufs_hba *hba, int lun,
2356                               struct ufshpb_lu_info *hpb_lu_info)
2357 {
2358         u16 max_active_rgns;
2359         u8 lu_enable;
2360         int size;
2361         int ret;
2362         char desc_buf[QUERY_DESC_MAX_SIZE];
2363
2364         ufshcd_map_desc_id_to_length(hba, QUERY_DESC_IDN_UNIT, &size);
2365
2366         ufshcd_rpm_get_sync(hba);
2367         ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
2368                                             QUERY_DESC_IDN_UNIT, lun, 0,
2369                                             desc_buf, &size);
2370         ufshcd_rpm_put_sync(hba);
2371
2372         if (ret) {
2373                 dev_err(hba->dev,
2374                         "%s: idn: %d lun: %d  query request failed",
2375                         __func__, QUERY_DESC_IDN_UNIT, lun);
2376                 return ret;
2377         }
2378
2379         lu_enable = desc_buf[UNIT_DESC_PARAM_LU_ENABLE];
2380         if (lu_enable != LU_ENABLED_HPB_FUNC)
2381                 return -ENODEV;
2382
2383         max_active_rgns = get_unaligned_be16(
2384                         desc_buf + UNIT_DESC_PARAM_HPB_LU_MAX_ACTIVE_RGNS);
2385         if (!max_active_rgns) {
2386                 dev_err(hba->dev,
2387                         "lun %d wrong number of max active regions\n", lun);
2388                 return -ENODEV;
2389         }
2390
2391         hpb_lu_info->num_blocks = get_unaligned_be64(
2392                         desc_buf + UNIT_DESC_PARAM_LOGICAL_BLK_COUNT);
2393         hpb_lu_info->pinned_start = get_unaligned_be16(
2394                         desc_buf + UNIT_DESC_PARAM_HPB_PIN_RGN_START_OFF);
2395         hpb_lu_info->num_pinned = get_unaligned_be16(
2396                         desc_buf + UNIT_DESC_PARAM_HPB_NUM_PIN_RGNS);
2397         hpb_lu_info->max_active_rgns = max_active_rgns;
2398
2399         return 0;
2400 }
2401
2402 void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev)
2403 {
2404         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2405
2406         if (!hpb)
2407                 return;
2408
2409         ufshpb_set_state(hpb, HPB_FAILED);
2410
2411         sdev = hpb->sdev_ufs_lu;
2412         sdev->hostdata = NULL;
2413
2414         ufshpb_cancel_jobs(hpb);
2415
2416         ufshpb_pre_req_mempool_destroy(hpb);
2417         ufshpb_destroy_region_tbl(hpb);
2418
2419         kmem_cache_destroy(hpb->map_req_cache);
2420         kmem_cache_destroy(hpb->m_page_cache);
2421
2422         list_del_init(&hpb->list_hpb_lu);
2423
2424         kfree(hpb);
2425 }
2426
2427 static void ufshpb_hpb_lu_prepared(struct ufs_hba *hba)
2428 {
2429         int pool_size;
2430         struct ufshpb_lu *hpb;
2431         struct scsi_device *sdev;
2432         bool init_success;
2433
2434         if (tot_active_srgn_pages == 0) {
2435                 ufshpb_remove(hba);
2436                 return;
2437         }
2438
2439         init_success = !ufshpb_check_hpb_reset_query(hba);
2440
2441         pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE;
2442         if (pool_size > tot_active_srgn_pages) {
2443                 mempool_resize(ufshpb_mctx_pool, tot_active_srgn_pages);
2444                 mempool_resize(ufshpb_page_pool, tot_active_srgn_pages);
2445         }
2446
2447         shost_for_each_device(sdev, hba->host) {
2448                 hpb = ufshpb_get_hpb_data(sdev);
2449                 if (!hpb)
2450                         continue;
2451
2452                 if (init_success) {
2453                         ufshpb_set_state(hpb, HPB_PRESENT);
2454                         if ((hpb->lu_pinned_end - hpb->lu_pinned_start) > 0)
2455                                 queue_work(ufshpb_wq, &hpb->map_work);
2456                         if (!hpb->is_hcm)
2457                                 ufshpb_issue_umap_all_req(hpb);
2458                 } else {
2459                         dev_err(hba->dev, "destroy HPB lu %d\n", hpb->lun);
2460                         ufshpb_destroy_lu(hba, sdev);
2461                 }
2462         }
2463
2464         if (!init_success)
2465                 ufshpb_remove(hba);
2466 }
2467
2468 void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev)
2469 {
2470         struct ufshpb_lu *hpb;
2471         int ret;
2472         struct ufshpb_lu_info hpb_lu_info = { 0 };
2473         int lun = sdev->lun;
2474
2475         if (lun >= hba->dev_info.max_lu_supported)
2476                 goto out;
2477
2478         ret = ufshpb_get_lu_info(hba, lun, &hpb_lu_info);
2479         if (ret)
2480                 goto out;
2481
2482         hpb = ufshpb_alloc_hpb_lu(hba, sdev, &hba->ufshpb_dev,
2483                                   &hpb_lu_info);
2484         if (!hpb)
2485                 goto out;
2486
2487         tot_active_srgn_pages += hpb_lu_info.max_active_rgns *
2488                         hpb->srgns_per_rgn * hpb->pages_per_srgn;
2489
2490 out:
2491         /* All LUs are initialized */
2492         if (atomic_dec_and_test(&hba->ufshpb_dev.slave_conf_cnt))
2493                 ufshpb_hpb_lu_prepared(hba);
2494 }
2495
2496 static int ufshpb_init_mem_wq(struct ufs_hba *hba)
2497 {
2498         int ret;
2499         unsigned int pool_size;
2500
2501         ufshpb_mctx_cache = kmem_cache_create("ufshpb_mctx_cache",
2502                                         sizeof(struct ufshpb_map_ctx),
2503                                         0, 0, NULL);
2504         if (!ufshpb_mctx_cache) {
2505                 dev_err(hba->dev, "ufshpb: cannot init mctx cache\n");
2506                 return -ENOMEM;
2507         }
2508
2509         pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE;
2510         dev_info(hba->dev, "%s:%d ufshpb_host_map_kbytes %u pool_size %u\n",
2511                __func__, __LINE__, ufshpb_host_map_kbytes, pool_size);
2512
2513         ufshpb_mctx_pool = mempool_create_slab_pool(pool_size,
2514                                                     ufshpb_mctx_cache);
2515         if (!ufshpb_mctx_pool) {
2516                 dev_err(hba->dev, "ufshpb: cannot init mctx pool\n");
2517                 ret = -ENOMEM;
2518                 goto release_mctx_cache;
2519         }
2520
2521         ufshpb_page_pool = mempool_create_page_pool(pool_size, 0);
2522         if (!ufshpb_page_pool) {
2523                 dev_err(hba->dev, "ufshpb: cannot init page pool\n");
2524                 ret = -ENOMEM;
2525                 goto release_mctx_pool;
2526         }
2527
2528         ufshpb_wq = alloc_workqueue("ufshpb-wq",
2529                                         WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
2530         if (!ufshpb_wq) {
2531                 dev_err(hba->dev, "ufshpb: alloc workqueue failed\n");
2532                 ret = -ENOMEM;
2533                 goto release_page_pool;
2534         }
2535
2536         return 0;
2537
2538 release_page_pool:
2539         mempool_destroy(ufshpb_page_pool);
2540 release_mctx_pool:
2541         mempool_destroy(ufshpb_mctx_pool);
2542 release_mctx_cache:
2543         kmem_cache_destroy(ufshpb_mctx_cache);
2544         return ret;
2545 }
2546
2547 void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf)
2548 {
2549         struct ufshpb_dev_info *hpb_info = &hba->ufshpb_dev;
2550         int max_active_rgns = 0;
2551         int hpb_num_lu;
2552
2553         hpb_num_lu = geo_buf[GEOMETRY_DESC_PARAM_HPB_NUMBER_LU];
2554         if (hpb_num_lu == 0) {
2555                 dev_err(hba->dev, "No HPB LU supported\n");
2556                 hpb_info->hpb_disabled = true;
2557                 return;
2558         }
2559
2560         hpb_info->rgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_REGION_SIZE];
2561         hpb_info->srgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_SUBREGION_SIZE];
2562         max_active_rgns = get_unaligned_be16(geo_buf +
2563                           GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS);
2564
2565         if (hpb_info->rgn_size == 0 || hpb_info->srgn_size == 0 ||
2566             max_active_rgns == 0) {
2567                 dev_err(hba->dev, "No HPB supported device\n");
2568                 hpb_info->hpb_disabled = true;
2569                 return;
2570         }
2571 }
2572
2573 void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf)
2574 {
2575         struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev;
2576         int version, ret;
2577         int max_single_cmd;
2578
2579         hpb_dev_info->control_mode = desc_buf[DEVICE_DESC_PARAM_HPB_CONTROL];
2580
2581         version = get_unaligned_be16(desc_buf + DEVICE_DESC_PARAM_HPB_VER);
2582         if ((version != HPB_SUPPORT_VERSION) &&
2583             (version != HPB_SUPPORT_LEGACY_VERSION)) {
2584                 dev_err(hba->dev, "%s: HPB %x version is not supported.\n",
2585                         __func__, version);
2586                 hpb_dev_info->hpb_disabled = true;
2587                 return;
2588         }
2589
2590         if (version == HPB_SUPPORT_LEGACY_VERSION)
2591                 hpb_dev_info->is_legacy = true;
2592
2593         /*
2594          * Get the number of user logical unit to check whether all
2595          * scsi_device finish initialization
2596          */
2597         hpb_dev_info->num_lu = desc_buf[DEVICE_DESC_PARAM_NUM_LU];
2598
2599         if (hpb_dev_info->is_legacy)
2600                 return;
2601
2602         ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
2603                 QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD, 0, 0, &max_single_cmd);
2604
2605         if (ret)
2606                 hpb_dev_info->max_hpb_single_cmd = HPB_LEGACY_CHUNK_HIGH;
2607         else
2608                 hpb_dev_info->max_hpb_single_cmd = min(max_single_cmd + 1, HPB_MULTI_CHUNK_HIGH);
2609 }
2610
2611 void ufshpb_init(struct ufs_hba *hba)
2612 {
2613         struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev;
2614         int try;
2615         int ret;
2616
2617         if (!ufshpb_is_allowed(hba) || !hba->dev_info.hpb_enabled)
2618                 return;
2619
2620         if (ufshpb_init_mem_wq(hba)) {
2621                 hpb_dev_info->hpb_disabled = true;
2622                 return;
2623         }
2624
2625         atomic_set(&hpb_dev_info->slave_conf_cnt, hpb_dev_info->num_lu);
2626         tot_active_srgn_pages = 0;
2627         /* issue HPB reset query */
2628         for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) {
2629                 ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
2630                                         QUERY_FLAG_IDN_HPB_RESET, 0, NULL);
2631                 if (!ret)
2632                         break;
2633         }
2634 }
2635
2636 void ufshpb_remove(struct ufs_hba *hba)
2637 {
2638         mempool_destroy(ufshpb_page_pool);
2639         mempool_destroy(ufshpb_mctx_pool);
2640         kmem_cache_destroy(ufshpb_mctx_cache);
2641
2642         destroy_workqueue(ufshpb_wq);
2643 }
2644
2645 module_param(ufshpb_host_map_kbytes, uint, 0644);
2646 MODULE_PARM_DESC(ufshpb_host_map_kbytes,
2647         "ufshpb host mapping memory kilo-bytes for ufshpb memory-pool");