1 // SPDX-License-Identifier: GPL-2.0
3 * Functions related to sysfs handling
5 #include <linux/kernel.h>
6 #include <linux/slab.h>
7 #include <linux/module.h>
9 #include <linux/blkdev.h>
10 #include <linux/backing-dev.h>
11 #include <linux/blktrace_api.h>
12 #include <linux/debugfs.h>
16 #include "blk-mq-debugfs.h"
17 #include "blk-mq-sched.h"
18 #include "blk-rq-qos.h"
20 #include "blk-cgroup.h"
21 #include "blk-throttle.h"
23 struct queue_sysfs_entry {
24 struct attribute attr;
25 ssize_t (*show)(struct request_queue *, char *);
26 ssize_t (*store)(struct request_queue *, const char *, size_t);
30 queue_var_show(unsigned long var, char *page)
32 return sprintf(page, "%lu\n", var);
36 queue_var_store(unsigned long *var, const char *page, size_t count)
41 err = kstrtoul(page, 10, &v);
42 if (err || v > UINT_MAX)
50 static ssize_t queue_requests_show(struct request_queue *q, char *page)
52 return queue_var_show(q->nr_requests, page);
56 queue_requests_store(struct request_queue *q, const char *page, size_t count)
64 ret = queue_var_store(&nr, page, count);
68 if (nr < BLKDEV_MIN_RQ)
71 err = blk_mq_update_nr_requests(q, nr);
78 static ssize_t queue_ra_show(struct request_queue *q, char *page)
84 ra_kb = q->disk->bdi->ra_pages << (PAGE_SHIFT - 10);
85 return queue_var_show(ra_kb, page);
89 queue_ra_store(struct request_queue *q, const char *page, size_t count)
96 ret = queue_var_store(&ra_kb, page, count);
99 q->disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
103 static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
105 int max_sectors_kb = queue_max_sectors(q) >> 1;
107 return queue_var_show(max_sectors_kb, page);
110 static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
112 return queue_var_show(queue_max_segments(q), page);
115 static ssize_t queue_max_discard_segments_show(struct request_queue *q,
118 return queue_var_show(queue_max_discard_segments(q), page);
121 static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
123 return queue_var_show(q->limits.max_integrity_segments, page);
126 static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
128 return queue_var_show(queue_max_segment_size(q), page);
131 static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
133 return queue_var_show(queue_logical_block_size(q), page);
136 static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
138 return queue_var_show(queue_physical_block_size(q), page);
141 static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page)
143 return queue_var_show(q->limits.chunk_sectors, page);
146 static ssize_t queue_io_min_show(struct request_queue *q, char *page)
148 return queue_var_show(queue_io_min(q), page);
151 static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
153 return queue_var_show(queue_io_opt(q), page);
156 static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
158 return queue_var_show(q->limits.discard_granularity, page);
161 static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page)
164 return sprintf(page, "%llu\n",
165 (unsigned long long)q->limits.max_hw_discard_sectors << 9);
168 static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
170 return sprintf(page, "%llu\n",
171 (unsigned long long)q->limits.max_discard_sectors << 9);
174 static ssize_t queue_discard_max_store(struct request_queue *q,
175 const char *page, size_t count)
177 unsigned long max_discard_bytes;
178 struct queue_limits lim;
182 ret = queue_var_store(&max_discard_bytes, page, count);
186 if (max_discard_bytes & (q->limits.discard_granularity - 1))
189 if ((max_discard_bytes >> SECTOR_SHIFT) > UINT_MAX)
192 blk_mq_freeze_queue(q);
193 lim = queue_limits_start_update(q);
194 lim.max_user_discard_sectors = max_discard_bytes >> SECTOR_SHIFT;
195 err = queue_limits_commit_update(q, &lim);
196 blk_mq_unfreeze_queue(q);
203 static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
205 return queue_var_show(0, page);
208 static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
210 return queue_var_show(0, page);
213 static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
215 return sprintf(page, "%llu\n",
216 (unsigned long long)q->limits.max_write_zeroes_sectors << 9);
219 static ssize_t queue_zone_write_granularity_show(struct request_queue *q,
222 return queue_var_show(queue_zone_write_granularity(q), page);
225 static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page)
227 unsigned long long max_sectors = q->limits.max_zone_append_sectors;
229 return sprintf(page, "%llu\n", max_sectors << SECTOR_SHIFT);
233 queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
235 unsigned long max_sectors_kb;
236 struct queue_limits lim;
240 ret = queue_var_store(&max_sectors_kb, page, count);
244 blk_mq_freeze_queue(q);
245 lim = queue_limits_start_update(q);
246 lim.max_user_sectors = max_sectors_kb << 1;
247 err = queue_limits_commit_update(q, &lim);
248 blk_mq_unfreeze_queue(q);
254 static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
256 int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
258 return queue_var_show(max_hw_sectors_kb, page);
261 static ssize_t queue_virt_boundary_mask_show(struct request_queue *q, char *page)
263 return queue_var_show(q->limits.virt_boundary_mask, page);
266 static ssize_t queue_dma_alignment_show(struct request_queue *q, char *page)
268 return queue_var_show(queue_dma_alignment(q), page);
271 #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \
273 queue_##name##_show(struct request_queue *q, char *page) \
276 bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \
277 return queue_var_show(neg ? !bit : bit, page); \
280 queue_##name##_store(struct request_queue *q, const char *page, size_t count) \
284 ret = queue_var_store(&val, page, count); \
291 blk_queue_flag_set(QUEUE_FLAG_##flag, q); \
293 blk_queue_flag_clear(QUEUE_FLAG_##flag, q); \
297 QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
298 QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
299 QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
300 QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0);
301 #undef QUEUE_SYSFS_BIT_FNS
303 static ssize_t queue_zoned_show(struct request_queue *q, char *page)
305 if (blk_queue_is_zoned(q))
306 return sprintf(page, "host-managed\n");
307 return sprintf(page, "none\n");
310 static ssize_t queue_nr_zones_show(struct request_queue *q, char *page)
312 return queue_var_show(disk_nr_zones(q->disk), page);
315 static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page)
317 return queue_var_show(bdev_max_open_zones(q->disk->part0), page);
320 static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page)
322 return queue_var_show(bdev_max_active_zones(q->disk->part0), page);
325 static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
327 return queue_var_show((blk_queue_nomerges(q) << 1) |
328 blk_queue_noxmerges(q), page);
331 static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
335 ssize_t ret = queue_var_store(&nm, page, count);
340 blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
341 blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
343 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
345 blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
350 static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
352 bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
353 bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags);
355 return queue_var_show(set << force, page);
359 queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
361 ssize_t ret = -EINVAL;
365 ret = queue_var_store(&val, page, count);
370 blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
371 blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
372 } else if (val == 1) {
373 blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
374 blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
375 } else if (val == 0) {
376 blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
377 blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
383 static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
385 return sprintf(page, "%d\n", -1);
388 static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
394 static ssize_t queue_poll_show(struct request_queue *q, char *page)
396 return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page);
399 static ssize_t queue_poll_store(struct request_queue *q, const char *page,
402 if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
404 pr_info_ratelimited("writes to the poll attribute are ignored.\n");
405 pr_info_ratelimited("please use driver specific parameters instead.\n");
409 static ssize_t queue_io_timeout_show(struct request_queue *q, char *page)
411 return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout));
414 static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page,
420 err = kstrtou32(page, 10, &val);
424 blk_queue_rq_timeout(q, msecs_to_jiffies(val));
429 static ssize_t queue_wc_show(struct request_queue *q, char *page)
431 if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
432 return sprintf(page, "write back\n");
434 return sprintf(page, "write through\n");
437 static ssize_t queue_wc_store(struct request_queue *q, const char *page,
440 if (!strncmp(page, "write back", 10)) {
441 if (!test_bit(QUEUE_FLAG_HW_WC, &q->queue_flags))
443 blk_queue_flag_set(QUEUE_FLAG_WC, q);
444 } else if (!strncmp(page, "write through", 13) ||
445 !strncmp(page, "none", 4)) {
446 blk_queue_flag_clear(QUEUE_FLAG_WC, q);
454 static ssize_t queue_fua_show(struct request_queue *q, char *page)
456 return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags));
459 static ssize_t queue_dax_show(struct request_queue *q, char *page)
461 return queue_var_show(blk_queue_dax(q), page);
464 #define QUEUE_RO_ENTRY(_prefix, _name) \
465 static struct queue_sysfs_entry _prefix##_entry = { \
466 .attr = { .name = _name, .mode = 0444 }, \
467 .show = _prefix##_show, \
470 #define QUEUE_RW_ENTRY(_prefix, _name) \
471 static struct queue_sysfs_entry _prefix##_entry = { \
472 .attr = { .name = _name, .mode = 0644 }, \
473 .show = _prefix##_show, \
474 .store = _prefix##_store, \
477 QUEUE_RW_ENTRY(queue_requests, "nr_requests");
478 QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb");
479 QUEUE_RW_ENTRY(queue_max_sectors, "max_sectors_kb");
480 QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb");
481 QUEUE_RO_ENTRY(queue_max_segments, "max_segments");
482 QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments");
483 QUEUE_RO_ENTRY(queue_max_segment_size, "max_segment_size");
484 QUEUE_RW_ENTRY(elv_iosched, "scheduler");
486 QUEUE_RO_ENTRY(queue_logical_block_size, "logical_block_size");
487 QUEUE_RO_ENTRY(queue_physical_block_size, "physical_block_size");
488 QUEUE_RO_ENTRY(queue_chunk_sectors, "chunk_sectors");
489 QUEUE_RO_ENTRY(queue_io_min, "minimum_io_size");
490 QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size");
492 QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments");
493 QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity");
494 QUEUE_RO_ENTRY(queue_discard_max_hw, "discard_max_hw_bytes");
495 QUEUE_RW_ENTRY(queue_discard_max, "discard_max_bytes");
496 QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data");
498 QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes");
499 QUEUE_RO_ENTRY(queue_write_zeroes_max, "write_zeroes_max_bytes");
500 QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes");
501 QUEUE_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity");
503 QUEUE_RO_ENTRY(queue_zoned, "zoned");
504 QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones");
505 QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones");
506 QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones");
508 QUEUE_RW_ENTRY(queue_nomerges, "nomerges");
509 QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity");
510 QUEUE_RW_ENTRY(queue_poll, "io_poll");
511 QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay");
512 QUEUE_RW_ENTRY(queue_wc, "write_cache");
513 QUEUE_RO_ENTRY(queue_fua, "fua");
514 QUEUE_RO_ENTRY(queue_dax, "dax");
515 QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout");
516 QUEUE_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask");
517 QUEUE_RO_ENTRY(queue_dma_alignment, "dma_alignment");
519 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
520 QUEUE_RW_ENTRY(blk_throtl_sample_time, "throttle_sample_time");
523 /* legacy alias for logical_block_size: */
524 static struct queue_sysfs_entry queue_hw_sector_size_entry = {
525 .attr = {.name = "hw_sector_size", .mode = 0444 },
526 .show = queue_logical_block_size_show,
529 QUEUE_RW_ENTRY(queue_nonrot, "rotational");
530 QUEUE_RW_ENTRY(queue_iostats, "iostats");
531 QUEUE_RW_ENTRY(queue_random, "add_random");
532 QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes");
534 #ifdef CONFIG_BLK_WBT
535 static ssize_t queue_var_store64(s64 *var, const char *page)
540 err = kstrtos64(page, 10, &v);
548 static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
554 return sprintf(page, "0\n");
556 return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000));
559 static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
566 ret = queue_var_store64(&val, page);
572 rqos = wbt_rq_qos(q);
574 ret = wbt_init(q->disk);
580 val = wbt_default_latency_nsec(q);
584 if (wbt_get_min_lat(q) == val)
588 * Ensure that the queue is idled, in case the latency update
589 * ends up either enabling or disabling wbt completely. We can't
590 * have IO inflight if that happens.
592 blk_mq_freeze_queue(q);
593 blk_mq_quiesce_queue(q);
595 wbt_set_min_lat(q, val);
597 blk_mq_unquiesce_queue(q);
598 blk_mq_unfreeze_queue(q);
603 QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec");
606 /* Common attributes for bio-based and request-based queues. */
607 static struct attribute *queue_attrs[] = {
608 &queue_ra_entry.attr,
609 &queue_max_hw_sectors_entry.attr,
610 &queue_max_sectors_entry.attr,
611 &queue_max_segments_entry.attr,
612 &queue_max_discard_segments_entry.attr,
613 &queue_max_integrity_segments_entry.attr,
614 &queue_max_segment_size_entry.attr,
615 &queue_hw_sector_size_entry.attr,
616 &queue_logical_block_size_entry.attr,
617 &queue_physical_block_size_entry.attr,
618 &queue_chunk_sectors_entry.attr,
619 &queue_io_min_entry.attr,
620 &queue_io_opt_entry.attr,
621 &queue_discard_granularity_entry.attr,
622 &queue_discard_max_entry.attr,
623 &queue_discard_max_hw_entry.attr,
624 &queue_discard_zeroes_data_entry.attr,
625 &queue_write_same_max_entry.attr,
626 &queue_write_zeroes_max_entry.attr,
627 &queue_zone_append_max_entry.attr,
628 &queue_zone_write_granularity_entry.attr,
629 &queue_nonrot_entry.attr,
630 &queue_zoned_entry.attr,
631 &queue_nr_zones_entry.attr,
632 &queue_max_open_zones_entry.attr,
633 &queue_max_active_zones_entry.attr,
634 &queue_nomerges_entry.attr,
635 &queue_iostats_entry.attr,
636 &queue_stable_writes_entry.attr,
637 &queue_random_entry.attr,
638 &queue_poll_entry.attr,
639 &queue_wc_entry.attr,
640 &queue_fua_entry.attr,
641 &queue_dax_entry.attr,
642 &queue_poll_delay_entry.attr,
643 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
644 &blk_throtl_sample_time_entry.attr,
646 &queue_virt_boundary_mask_entry.attr,
647 &queue_dma_alignment_entry.attr,
651 /* Request-based queue attributes that are not relevant for bio-based queues. */
652 static struct attribute *blk_mq_queue_attrs[] = {
653 &queue_requests_entry.attr,
654 &elv_iosched_entry.attr,
655 &queue_rq_affinity_entry.attr,
656 &queue_io_timeout_entry.attr,
657 #ifdef CONFIG_BLK_WBT
658 &queue_wb_lat_entry.attr,
663 static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr,
666 struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
667 struct request_queue *q = disk->queue;
669 if ((attr == &queue_max_open_zones_entry.attr ||
670 attr == &queue_max_active_zones_entry.attr) &&
671 !blk_queue_is_zoned(q))
677 static umode_t blk_mq_queue_attr_visible(struct kobject *kobj,
678 struct attribute *attr, int n)
680 struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
681 struct request_queue *q = disk->queue;
686 if (attr == &queue_io_timeout_entry.attr && !q->mq_ops->timeout)
692 static struct attribute_group queue_attr_group = {
693 .attrs = queue_attrs,
694 .is_visible = queue_attr_visible,
697 static struct attribute_group blk_mq_queue_attr_group = {
698 .attrs = blk_mq_queue_attrs,
699 .is_visible = blk_mq_queue_attr_visible,
702 #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
705 queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
707 struct queue_sysfs_entry *entry = to_queue(attr);
708 struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
709 struct request_queue *q = disk->queue;
714 mutex_lock(&q->sysfs_lock);
715 res = entry->show(q, page);
716 mutex_unlock(&q->sysfs_lock);
721 queue_attr_store(struct kobject *kobj, struct attribute *attr,
722 const char *page, size_t length)
724 struct queue_sysfs_entry *entry = to_queue(attr);
725 struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
726 struct request_queue *q = disk->queue;
732 mutex_lock(&q->sysfs_lock);
733 res = entry->store(q, page, length);
734 mutex_unlock(&q->sysfs_lock);
738 static const struct sysfs_ops queue_sysfs_ops = {
739 .show = queue_attr_show,
740 .store = queue_attr_store,
743 static const struct attribute_group *blk_queue_attr_groups[] = {
745 &blk_mq_queue_attr_group,
749 static void blk_queue_release(struct kobject *kobj)
751 /* nothing to do here, all data is associated with the parent gendisk */
754 static const struct kobj_type blk_queue_ktype = {
755 .default_groups = blk_queue_attr_groups,
756 .sysfs_ops = &queue_sysfs_ops,
757 .release = blk_queue_release,
760 static void blk_debugfs_remove(struct gendisk *disk)
762 struct request_queue *q = disk->queue;
764 mutex_lock(&q->debugfs_mutex);
765 blk_trace_shutdown(q);
766 debugfs_remove_recursive(q->debugfs_dir);
767 q->debugfs_dir = NULL;
768 q->sched_debugfs_dir = NULL;
769 q->rqos_debugfs_dir = NULL;
770 mutex_unlock(&q->debugfs_mutex);
774 * blk_register_queue - register a block layer queue with sysfs
775 * @disk: Disk of which the request queue should be registered with sysfs.
777 int blk_register_queue(struct gendisk *disk)
779 struct request_queue *q = disk->queue;
782 mutex_lock(&q->sysfs_dir_lock);
783 kobject_init(&disk->queue_kobj, &blk_queue_ktype);
784 ret = kobject_add(&disk->queue_kobj, &disk_to_dev(disk)->kobj, "queue");
786 goto out_put_queue_kobj;
788 if (queue_is_mq(q)) {
789 ret = blk_mq_sysfs_register(disk);
791 goto out_put_queue_kobj;
793 mutex_lock(&q->sysfs_lock);
795 mutex_lock(&q->debugfs_mutex);
796 q->debugfs_dir = debugfs_create_dir(disk->disk_name, blk_debugfs_root);
798 blk_mq_debugfs_register(q);
799 mutex_unlock(&q->debugfs_mutex);
801 ret = disk_register_independent_access_ranges(disk);
803 goto out_debugfs_remove;
806 ret = elv_register_queue(q, false);
808 goto out_unregister_ia_ranges;
811 ret = blk_crypto_sysfs_register(disk);
813 goto out_elv_unregister;
815 blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
816 wbt_enable_default(disk);
817 blk_throtl_register(disk);
819 /* Now everything is ready and send out KOBJ_ADD uevent */
820 kobject_uevent(&disk->queue_kobj, KOBJ_ADD);
822 kobject_uevent(&q->elevator->kobj, KOBJ_ADD);
823 mutex_unlock(&q->sysfs_lock);
824 mutex_unlock(&q->sysfs_dir_lock);
827 * SCSI probing may synchronously create and destroy a lot of
828 * request_queues for non-existent devices. Shutting down a fully
829 * functional queue takes measureable wallclock time as RCU grace
830 * periods are involved. To avoid excessive latency in these
831 * cases, a request_queue starts out in a degraded mode which is
832 * faster to shut down and is made fully functional here as
833 * request_queues for non-existent devices never get registered.
835 if (!blk_queue_init_done(q)) {
836 blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
837 percpu_ref_switch_to_percpu(&q->q_usage_counter);
843 elv_unregister_queue(q);
844 out_unregister_ia_ranges:
845 disk_unregister_independent_access_ranges(disk);
847 blk_debugfs_remove(disk);
848 mutex_unlock(&q->sysfs_lock);
850 kobject_put(&disk->queue_kobj);
851 mutex_unlock(&q->sysfs_dir_lock);
856 * blk_unregister_queue - counterpart of blk_register_queue()
857 * @disk: Disk of which the request queue should be unregistered from sysfs.
859 * Note: the caller is responsible for guaranteeing that this function is called
860 * after blk_register_queue() has finished.
862 void blk_unregister_queue(struct gendisk *disk)
864 struct request_queue *q = disk->queue;
869 /* Return early if disk->queue was never registered. */
870 if (!blk_queue_registered(q))
874 * Since sysfs_remove_dir() prevents adding new directory entries
875 * before removal of existing entries starts, protect against
876 * concurrent elv_iosched_store() calls.
878 mutex_lock(&q->sysfs_lock);
879 blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
880 mutex_unlock(&q->sysfs_lock);
882 mutex_lock(&q->sysfs_dir_lock);
884 * Remove the sysfs attributes before unregistering the queue data
885 * structures that can be modified through sysfs.
888 blk_mq_sysfs_unregister(disk);
889 blk_crypto_sysfs_unregister(disk);
891 mutex_lock(&q->sysfs_lock);
892 elv_unregister_queue(q);
893 disk_unregister_independent_access_ranges(disk);
894 mutex_unlock(&q->sysfs_lock);
896 /* Now that we've deleted all child objects, we can delete the queue. */
897 kobject_uevent(&disk->queue_kobj, KOBJ_REMOVE);
898 kobject_del(&disk->queue_kobj);
899 mutex_unlock(&q->sysfs_dir_lock);
901 blk_debugfs_remove(disk);