1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/module.h>
4 #include <linux/backing-dev.h>
6 #include <linux/blkdev.h>
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/workqueue.h>
11 #include <linux/smp.h>
13 #include <linux/blk-mq.h>
16 #include "blk-mq-tag.h"
18 static void blk_mq_sysfs_release(struct kobject *kobj)
20 struct blk_mq_ctxs *ctxs = container_of(kobj, struct blk_mq_ctxs, kobj);
22 free_percpu(ctxs->queue_ctx);
26 static void blk_mq_ctx_sysfs_release(struct kobject *kobj)
28 struct blk_mq_ctx *ctx = container_of(kobj, struct blk_mq_ctx, kobj);
30 /* ctx->ctxs won't be released until all ctx are freed */
31 kobject_put(&ctx->ctxs->kobj);
34 static void blk_mq_hw_sysfs_release(struct kobject *kobj)
36 struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
39 if (hctx->flags & BLK_MQ_F_BLOCKING)
40 cleanup_srcu_struct(hctx->srcu);
41 blk_free_flush_queue(hctx->fq);
42 sbitmap_free(&hctx->ctx_map);
43 free_cpumask_var(hctx->cpumask);
48 struct blk_mq_ctx_sysfs_entry {
49 struct attribute attr;
50 ssize_t (*show)(struct blk_mq_ctx *, char *);
51 ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
54 struct blk_mq_hw_ctx_sysfs_entry {
55 struct attribute attr;
56 ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
57 ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
60 static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
63 struct blk_mq_ctx_sysfs_entry *entry;
64 struct blk_mq_ctx *ctx;
65 struct request_queue *q;
68 entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
69 ctx = container_of(kobj, struct blk_mq_ctx, kobj);
76 mutex_lock(&q->sysfs_lock);
77 if (!blk_queue_dying(q))
78 res = entry->show(ctx, page);
79 mutex_unlock(&q->sysfs_lock);
83 static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
84 const char *page, size_t length)
86 struct blk_mq_ctx_sysfs_entry *entry;
87 struct blk_mq_ctx *ctx;
88 struct request_queue *q;
91 entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
92 ctx = container_of(kobj, struct blk_mq_ctx, kobj);
99 mutex_lock(&q->sysfs_lock);
100 if (!blk_queue_dying(q))
101 res = entry->store(ctx, page, length);
102 mutex_unlock(&q->sysfs_lock);
106 static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
107 struct attribute *attr, char *page)
109 struct blk_mq_hw_ctx_sysfs_entry *entry;
110 struct blk_mq_hw_ctx *hctx;
111 struct request_queue *q;
114 entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
115 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
122 mutex_lock(&q->sysfs_lock);
123 if (!blk_queue_dying(q))
124 res = entry->show(hctx, page);
125 mutex_unlock(&q->sysfs_lock);
129 static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
130 struct attribute *attr, const char *page,
133 struct blk_mq_hw_ctx_sysfs_entry *entry;
134 struct blk_mq_hw_ctx *hctx;
135 struct request_queue *q;
138 entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
139 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
146 mutex_lock(&q->sysfs_lock);
147 if (!blk_queue_dying(q))
148 res = entry->store(hctx, page, length);
149 mutex_unlock(&q->sysfs_lock);
153 static ssize_t blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx *hctx,
156 return sprintf(page, "%u\n", hctx->tags->nr_tags);
159 static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx,
162 return sprintf(page, "%u\n", hctx->tags->nr_reserved_tags);
165 static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
167 const size_t size = PAGE_SIZE - 1;
168 unsigned int i, first = 1;
169 int ret = 0, pos = 0;
171 for_each_cpu(i, hctx->cpumask) {
173 ret = snprintf(pos + page, size - pos, "%u", i);
175 ret = snprintf(pos + page, size - pos, ", %u", i);
177 if (ret >= size - pos)
184 ret = snprintf(pos + page, size + 1 - pos, "\n");
188 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags = {
189 .attr = {.name = "nr_tags", .mode = 0444 },
190 .show = blk_mq_hw_sysfs_nr_tags_show,
192 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_reserved_tags = {
193 .attr = {.name = "nr_reserved_tags", .mode = 0444 },
194 .show = blk_mq_hw_sysfs_nr_reserved_tags_show,
196 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
197 .attr = {.name = "cpu_list", .mode = 0444 },
198 .show = blk_mq_hw_sysfs_cpus_show,
201 static struct attribute *default_hw_ctx_attrs[] = {
202 &blk_mq_hw_sysfs_nr_tags.attr,
203 &blk_mq_hw_sysfs_nr_reserved_tags.attr,
204 &blk_mq_hw_sysfs_cpus.attr,
207 ATTRIBUTE_GROUPS(default_hw_ctx);
209 static const struct sysfs_ops blk_mq_sysfs_ops = {
210 .show = blk_mq_sysfs_show,
211 .store = blk_mq_sysfs_store,
214 static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
215 .show = blk_mq_hw_sysfs_show,
216 .store = blk_mq_hw_sysfs_store,
219 static struct kobj_type blk_mq_ktype = {
220 .sysfs_ops = &blk_mq_sysfs_ops,
221 .release = blk_mq_sysfs_release,
224 static struct kobj_type blk_mq_ctx_ktype = {
225 .sysfs_ops = &blk_mq_sysfs_ops,
226 .release = blk_mq_ctx_sysfs_release,
229 static struct kobj_type blk_mq_hw_ktype = {
230 .sysfs_ops = &blk_mq_hw_sysfs_ops,
231 .default_groups = default_hw_ctx_groups,
232 .release = blk_mq_hw_sysfs_release,
235 static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
237 struct blk_mq_ctx *ctx;
243 hctx_for_each_ctx(hctx, ctx, i)
244 kobject_del(&ctx->kobj);
246 kobject_del(&hctx->kobj);
249 static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
251 struct request_queue *q = hctx->queue;
252 struct blk_mq_ctx *ctx;
258 ret = kobject_add(&hctx->kobj, q->mq_kobj, "%u", hctx->queue_num);
262 hctx_for_each_ctx(hctx, ctx, i) {
263 ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
270 hctx_for_each_ctx(hctx, ctx, j) {
272 kobject_del(&ctx->kobj);
274 kobject_del(&hctx->kobj);
278 void blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
280 struct blk_mq_hw_ctx *hctx;
283 lockdep_assert_held(&q->sysfs_dir_lock);
285 queue_for_each_hw_ctx(q, hctx, i)
286 blk_mq_unregister_hctx(hctx);
288 kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
289 kobject_del(q->mq_kobj);
290 kobject_put(&dev->kobj);
292 q->mq_sysfs_init_done = false;
295 void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
297 kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
300 void blk_mq_sysfs_deinit(struct request_queue *q)
302 struct blk_mq_ctx *ctx;
305 for_each_possible_cpu(cpu) {
306 ctx = per_cpu_ptr(q->queue_ctx, cpu);
307 kobject_put(&ctx->kobj);
309 kobject_put(q->mq_kobj);
312 void blk_mq_sysfs_init(struct request_queue *q)
314 struct blk_mq_ctx *ctx;
317 kobject_init(q->mq_kobj, &blk_mq_ktype);
319 for_each_possible_cpu(cpu) {
320 ctx = per_cpu_ptr(q->queue_ctx, cpu);
322 kobject_get(q->mq_kobj);
323 kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
327 int __blk_mq_register_dev(struct device *dev, struct request_queue *q)
329 struct blk_mq_hw_ctx *hctx;
332 WARN_ON_ONCE(!q->kobj.parent);
333 lockdep_assert_held(&q->sysfs_dir_lock);
335 ret = kobject_add(q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
339 kobject_uevent(q->mq_kobj, KOBJ_ADD);
341 queue_for_each_hw_ctx(q, hctx, i) {
342 ret = blk_mq_register_hctx(hctx);
347 q->mq_sysfs_init_done = true;
354 blk_mq_unregister_hctx(q->queue_hw_ctx[i]);
356 kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
357 kobject_del(q->mq_kobj);
358 kobject_put(&dev->kobj);
362 void blk_mq_sysfs_unregister(struct request_queue *q)
364 struct blk_mq_hw_ctx *hctx;
367 mutex_lock(&q->sysfs_dir_lock);
368 if (!q->mq_sysfs_init_done)
371 queue_for_each_hw_ctx(q, hctx, i)
372 blk_mq_unregister_hctx(hctx);
375 mutex_unlock(&q->sysfs_dir_lock);
378 int blk_mq_sysfs_register(struct request_queue *q)
380 struct blk_mq_hw_ctx *hctx;
383 mutex_lock(&q->sysfs_dir_lock);
384 if (!q->mq_sysfs_init_done)
387 queue_for_each_hw_ctx(q, hctx, i) {
388 ret = blk_mq_register_hctx(hctx);
394 mutex_unlock(&q->sysfs_dir_lock);