1 // SPDX-License-Identifier: GPL-2.0
5 * Author: SeongJae Park <sjpark@amazon.de>
8 #define pr_fmt(fmt) "damon: " fmt
10 #include <linux/damon.h>
11 #include <linux/delay.h>
12 #include <linux/kthread.h>
13 #include <linux/random.h>
14 #include <linux/slab.h>
16 #define CREATE_TRACE_POINTS
17 #include <trace/events/damon.h>
19 #ifdef CONFIG_DAMON_KUNIT_TEST
20 #undef DAMON_MIN_REGION
21 #define DAMON_MIN_REGION 1
24 /* Get a random number in [l, r) */
25 #define damon_rand(l, r) (l + prandom_u32_max(r - l))
27 static DEFINE_MUTEX(damon_lock);
28 static int nr_running_ctxs;
31 * Construct a damon_region struct
33 * Returns the pointer to the new struct if success, or NULL otherwise
35 struct damon_region *damon_new_region(unsigned long start, unsigned long end)
37 struct damon_region *region;
39 region = kmalloc(sizeof(*region), GFP_KERNEL);
43 region->ar.start = start;
45 region->nr_accesses = 0;
46 INIT_LIST_HEAD(®ion->list);
52 * Add a region between two other regions
54 inline void damon_insert_region(struct damon_region *r,
55 struct damon_region *prev, struct damon_region *next,
56 struct damon_target *t)
58 __list_add(&r->list, &prev->list, &next->list);
62 void damon_add_region(struct damon_region *r, struct damon_target *t)
64 list_add_tail(&r->list, &t->regions_list);
68 static void damon_del_region(struct damon_region *r, struct damon_target *t)
74 static void damon_free_region(struct damon_region *r)
79 void damon_destroy_region(struct damon_region *r, struct damon_target *t)
81 damon_del_region(r, t);
86 * Construct a damon_target struct
88 * Returns the pointer to the new struct if success, or NULL otherwise
90 struct damon_target *damon_new_target(unsigned long id)
92 struct damon_target *t;
94 t = kmalloc(sizeof(*t), GFP_KERNEL);
100 INIT_LIST_HEAD(&t->regions_list);
105 void damon_add_target(struct damon_ctx *ctx, struct damon_target *t)
107 list_add_tail(&t->list, &ctx->adaptive_targets);
110 static void damon_del_target(struct damon_target *t)
115 void damon_free_target(struct damon_target *t)
117 struct damon_region *r, *next;
119 damon_for_each_region_safe(r, next, t)
120 damon_free_region(r);
124 void damon_destroy_target(struct damon_target *t)
127 damon_free_target(t);
130 unsigned int damon_nr_regions(struct damon_target *t)
132 return t->nr_regions;
135 struct damon_ctx *damon_new_ctx(void)
137 struct damon_ctx *ctx;
139 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
143 ctx->sample_interval = 5 * 1000;
144 ctx->aggr_interval = 100 * 1000;
145 ctx->primitive_update_interval = 60 * 1000 * 1000;
147 ktime_get_coarse_ts64(&ctx->last_aggregation);
148 ctx->last_primitive_update = ctx->last_aggregation;
150 mutex_init(&ctx->kdamond_lock);
152 ctx->min_nr_regions = 10;
153 ctx->max_nr_regions = 1000;
155 INIT_LIST_HEAD(&ctx->adaptive_targets);
160 static void damon_destroy_targets(struct damon_ctx *ctx)
162 struct damon_target *t, *next_t;
164 if (ctx->primitive.cleanup) {
165 ctx->primitive.cleanup(ctx);
169 damon_for_each_target_safe(t, next_t, ctx)
170 damon_destroy_target(t);
173 void damon_destroy_ctx(struct damon_ctx *ctx)
175 damon_destroy_targets(ctx);
180 * damon_set_targets() - Set monitoring targets.
181 * @ctx: monitoring context
182 * @ids: array of target ids
183 * @nr_ids: number of entries in @ids
185 * This function should not be called while the kdamond is running.
187 * Return: 0 on success, negative error code otherwise.
189 int damon_set_targets(struct damon_ctx *ctx,
190 unsigned long *ids, ssize_t nr_ids)
193 struct damon_target *t, *next;
195 damon_destroy_targets(ctx);
197 for (i = 0; i < nr_ids; i++) {
198 t = damon_new_target(ids[i]);
200 pr_err("Failed to alloc damon_target\n");
201 /* The caller should do cleanup of the ids itself */
202 damon_for_each_target_safe(t, next, ctx)
203 damon_destroy_target(t);
206 damon_add_target(ctx, t);
213 * damon_set_attrs() - Set attributes for the monitoring.
214 * @ctx: monitoring context
215 * @sample_int: time interval between samplings
216 * @aggr_int: time interval between aggregations
217 * @primitive_upd_int: time interval between monitoring primitive updates
218 * @min_nr_reg: minimal number of regions
219 * @max_nr_reg: maximum number of regions
221 * This function should not be called while the kdamond is running.
222 * Every time interval is in micro-seconds.
224 * Return: 0 on success, negative error code otherwise.
226 int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int,
227 unsigned long aggr_int, unsigned long primitive_upd_int,
228 unsigned long min_nr_reg, unsigned long max_nr_reg)
230 if (min_nr_reg < 3) {
231 pr_err("min_nr_regions (%lu) must be at least 3\n",
235 if (min_nr_reg > max_nr_reg) {
236 pr_err("invalid nr_regions. min (%lu) > max (%lu)\n",
237 min_nr_reg, max_nr_reg);
241 ctx->sample_interval = sample_int;
242 ctx->aggr_interval = aggr_int;
243 ctx->primitive_update_interval = primitive_upd_int;
244 ctx->min_nr_regions = min_nr_reg;
245 ctx->max_nr_regions = max_nr_reg;
251 * damon_nr_running_ctxs() - Return number of currently running contexts.
253 int damon_nr_running_ctxs(void)
257 mutex_lock(&damon_lock);
258 nr_ctxs = nr_running_ctxs;
259 mutex_unlock(&damon_lock);
264 /* Returns the size upper limit for each monitoring region */
265 static unsigned long damon_region_sz_limit(struct damon_ctx *ctx)
267 struct damon_target *t;
268 struct damon_region *r;
269 unsigned long sz = 0;
271 damon_for_each_target(t, ctx) {
272 damon_for_each_region(r, t)
273 sz += r->ar.end - r->ar.start;
276 if (ctx->min_nr_regions)
277 sz /= ctx->min_nr_regions;
278 if (sz < DAMON_MIN_REGION)
279 sz = DAMON_MIN_REGION;
284 static bool damon_kdamond_running(struct damon_ctx *ctx)
288 mutex_lock(&ctx->kdamond_lock);
289 running = ctx->kdamond != NULL;
290 mutex_unlock(&ctx->kdamond_lock);
295 static int kdamond_fn(void *data);
298 * __damon_start() - Starts monitoring with given context.
299 * @ctx: monitoring context
301 * This function should be called while damon_lock is hold.
303 * Return: 0 on success, negative error code otherwise.
305 static int __damon_start(struct damon_ctx *ctx)
309 mutex_lock(&ctx->kdamond_lock);
312 ctx->kdamond_stop = false;
313 ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d",
315 if (IS_ERR(ctx->kdamond)) {
316 err = PTR_ERR(ctx->kdamond);
320 mutex_unlock(&ctx->kdamond_lock);
326 * damon_start() - Starts the monitorings for a given group of contexts.
327 * @ctxs: an array of the pointers for contexts to start monitoring
328 * @nr_ctxs: size of @ctxs
330 * This function starts a group of monitoring threads for a group of monitoring
331 * contexts. One thread per each context is created and run in parallel. The
332 * caller should handle synchronization between the threads by itself. If a
333 * group of threads that created by other 'damon_start()' call is currently
334 * running, this function does nothing but returns -EBUSY.
336 * Return: 0 on success, negative error code otherwise.
338 int damon_start(struct damon_ctx **ctxs, int nr_ctxs)
343 mutex_lock(&damon_lock);
344 if (nr_running_ctxs) {
345 mutex_unlock(&damon_lock);
349 for (i = 0; i < nr_ctxs; i++) {
350 err = __damon_start(ctxs[i]);
355 mutex_unlock(&damon_lock);
360 static void kdamond_usleep(unsigned long usecs)
362 /* See Documentation/timers/timers-howto.rst for the thresholds */
363 if (usecs > 20 * 1000)
364 schedule_timeout_idle(usecs_to_jiffies(usecs));
366 usleep_idle_range(usecs, usecs + 1);
370 * __damon_stop() - Stops monitoring of given context.
371 * @ctx: monitoring context
373 * Return: 0 on success, negative error code otherwise.
375 static int __damon_stop(struct damon_ctx *ctx)
377 mutex_lock(&ctx->kdamond_lock);
379 ctx->kdamond_stop = true;
380 mutex_unlock(&ctx->kdamond_lock);
381 while (damon_kdamond_running(ctx))
382 kdamond_usleep(ctx->sample_interval);
385 mutex_unlock(&ctx->kdamond_lock);
391 * damon_stop() - Stops the monitorings for a given group of contexts.
392 * @ctxs: an array of the pointers for contexts to stop monitoring
393 * @nr_ctxs: size of @ctxs
395 * Return: 0 on success, negative error code otherwise.
397 int damon_stop(struct damon_ctx **ctxs, int nr_ctxs)
401 for (i = 0; i < nr_ctxs; i++) {
402 /* nr_running_ctxs is decremented in kdamond_fn */
403 err = __damon_stop(ctxs[i]);
412 * damon_check_reset_time_interval() - Check if a time interval is elapsed.
413 * @baseline: the time to check whether the interval has elapsed since
414 * @interval: the time interval (microseconds)
416 * See whether the given time interval has passed since the given baseline
417 * time. If so, it also updates the baseline to current time for next check.
419 * Return: true if the time interval has passed, or false otherwise.
421 static bool damon_check_reset_time_interval(struct timespec64 *baseline,
422 unsigned long interval)
424 struct timespec64 now;
426 ktime_get_coarse_ts64(&now);
427 if ((timespec64_to_ns(&now) - timespec64_to_ns(baseline)) <
435 * Check whether it is time to flush the aggregated information
437 static bool kdamond_aggregate_interval_passed(struct damon_ctx *ctx)
439 return damon_check_reset_time_interval(&ctx->last_aggregation,
444 * Reset the aggregated monitoring results ('nr_accesses' of each region).
446 static void kdamond_reset_aggregated(struct damon_ctx *c)
448 struct damon_target *t;
450 damon_for_each_target(t, c) {
451 struct damon_region *r;
453 damon_for_each_region(r, t) {
454 trace_damon_aggregated(t, r, damon_nr_regions(t));
460 #define sz_damon_region(r) (r->ar.end - r->ar.start)
463 * Merge two adjacent regions into one region
465 static void damon_merge_two_regions(struct damon_target *t,
466 struct damon_region *l, struct damon_region *r)
468 unsigned long sz_l = sz_damon_region(l), sz_r = sz_damon_region(r);
470 l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) /
472 l->ar.end = r->ar.end;
473 damon_destroy_region(r, t);
476 #define diff_of(a, b) (a > b ? a - b : b - a)
479 * Merge adjacent regions having similar access frequencies
481 * t target affected by this merge operation
482 * thres '->nr_accesses' diff threshold for the merge
483 * sz_limit size upper limit of each region
485 static void damon_merge_regions_of(struct damon_target *t, unsigned int thres,
486 unsigned long sz_limit)
488 struct damon_region *r, *prev = NULL, *next;
490 damon_for_each_region_safe(r, next, t) {
491 if (prev && prev->ar.end == r->ar.start &&
492 diff_of(prev->nr_accesses, r->nr_accesses) <= thres &&
493 sz_damon_region(prev) + sz_damon_region(r) <= sz_limit)
494 damon_merge_two_regions(t, prev, r);
501 * Merge adjacent regions having similar access frequencies
503 * threshold '->nr_accesses' diff threshold for the merge
504 * sz_limit size upper limit of each region
506 * This function merges monitoring target regions which are adjacent and their
507 * access frequencies are similar. This is for minimizing the monitoring
508 * overhead under the dynamically changeable access pattern. If a merge was
509 * unnecessarily made, later 'kdamond_split_regions()' will revert it.
511 static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold,
512 unsigned long sz_limit)
514 struct damon_target *t;
516 damon_for_each_target(t, c)
517 damon_merge_regions_of(t, threshold, sz_limit);
521 * Split a region in two
523 * r the region to be split
524 * sz_r size of the first sub-region that will be made
526 static void damon_split_region_at(struct damon_ctx *ctx,
527 struct damon_target *t, struct damon_region *r,
530 struct damon_region *new;
532 new = damon_new_region(r->ar.start + sz_r, r->ar.end);
536 r->ar.end = new->ar.start;
538 damon_insert_region(new, r, damon_next_region(r), t);
541 /* Split every region in the given target into 'nr_subs' regions */
542 static void damon_split_regions_of(struct damon_ctx *ctx,
543 struct damon_target *t, int nr_subs)
545 struct damon_region *r, *next;
546 unsigned long sz_region, sz_sub = 0;
549 damon_for_each_region_safe(r, next, t) {
550 sz_region = r->ar.end - r->ar.start;
552 for (i = 0; i < nr_subs - 1 &&
553 sz_region > 2 * DAMON_MIN_REGION; i++) {
555 * Randomly select size of left sub-region to be at
556 * least 10 percent and at most 90% of original region
558 sz_sub = ALIGN_DOWN(damon_rand(1, 10) *
559 sz_region / 10, DAMON_MIN_REGION);
560 /* Do not allow blank region */
561 if (sz_sub == 0 || sz_sub >= sz_region)
564 damon_split_region_at(ctx, t, r, sz_sub);
571 * Split every target region into randomly-sized small regions
573 * This function splits every target region into random-sized small regions if
574 * current total number of the regions is equal or smaller than half of the
575 * user-specified maximum number of regions. This is for maximizing the
576 * monitoring accuracy under the dynamically changeable access patterns. If a
577 * split was unnecessarily made, later 'kdamond_merge_regions()' will revert
580 static void kdamond_split_regions(struct damon_ctx *ctx)
582 struct damon_target *t;
583 unsigned int nr_regions = 0;
584 static unsigned int last_nr_regions;
585 int nr_subregions = 2;
587 damon_for_each_target(t, ctx)
588 nr_regions += damon_nr_regions(t);
590 if (nr_regions > ctx->max_nr_regions / 2)
593 /* Maybe the middle of the region has different access frequency */
594 if (last_nr_regions == nr_regions &&
595 nr_regions < ctx->max_nr_regions / 3)
598 damon_for_each_target(t, ctx)
599 damon_split_regions_of(ctx, t, nr_subregions);
601 last_nr_regions = nr_regions;
605 * Check whether it is time to check and apply the target monitoring regions
607 * Returns true if it is.
609 static bool kdamond_need_update_primitive(struct damon_ctx *ctx)
611 return damon_check_reset_time_interval(&ctx->last_primitive_update,
612 ctx->primitive_update_interval);
616 * Check whether current monitoring should be stopped
618 * The monitoring is stopped when either the user requested to stop, or all
619 * monitoring targets are invalid.
621 * Returns true if need to stop current monitoring.
623 static bool kdamond_need_stop(struct damon_ctx *ctx)
625 struct damon_target *t;
628 mutex_lock(&ctx->kdamond_lock);
629 stop = ctx->kdamond_stop;
630 mutex_unlock(&ctx->kdamond_lock);
634 if (!ctx->primitive.target_valid)
637 damon_for_each_target(t, ctx) {
638 if (ctx->primitive.target_valid(t))
645 static void set_kdamond_stop(struct damon_ctx *ctx)
647 mutex_lock(&ctx->kdamond_lock);
648 ctx->kdamond_stop = true;
649 mutex_unlock(&ctx->kdamond_lock);
653 * The monitoring daemon that runs as a kernel thread
655 static int kdamond_fn(void *data)
657 struct damon_ctx *ctx = (struct damon_ctx *)data;
658 struct damon_target *t;
659 struct damon_region *r, *next;
660 unsigned int max_nr_accesses = 0;
661 unsigned long sz_limit = 0;
663 mutex_lock(&ctx->kdamond_lock);
664 pr_info("kdamond (%d) starts\n", ctx->kdamond->pid);
665 mutex_unlock(&ctx->kdamond_lock);
667 if (ctx->primitive.init)
668 ctx->primitive.init(ctx);
669 if (ctx->callback.before_start && ctx->callback.before_start(ctx))
670 set_kdamond_stop(ctx);
672 sz_limit = damon_region_sz_limit(ctx);
674 while (!kdamond_need_stop(ctx)) {
675 if (ctx->primitive.prepare_access_checks)
676 ctx->primitive.prepare_access_checks(ctx);
677 if (ctx->callback.after_sampling &&
678 ctx->callback.after_sampling(ctx))
679 set_kdamond_stop(ctx);
681 kdamond_usleep(ctx->sample_interval);
683 if (ctx->primitive.check_accesses)
684 max_nr_accesses = ctx->primitive.check_accesses(ctx);
686 if (kdamond_aggregate_interval_passed(ctx)) {
687 kdamond_merge_regions(ctx,
688 max_nr_accesses / 10,
690 if (ctx->callback.after_aggregation &&
691 ctx->callback.after_aggregation(ctx))
692 set_kdamond_stop(ctx);
693 kdamond_reset_aggregated(ctx);
694 kdamond_split_regions(ctx);
695 if (ctx->primitive.reset_aggregated)
696 ctx->primitive.reset_aggregated(ctx);
699 if (kdamond_need_update_primitive(ctx)) {
700 if (ctx->primitive.update)
701 ctx->primitive.update(ctx);
702 sz_limit = damon_region_sz_limit(ctx);
705 damon_for_each_target(t, ctx) {
706 damon_for_each_region_safe(r, next, t)
707 damon_destroy_region(r, t);
710 if (ctx->callback.before_terminate &&
711 ctx->callback.before_terminate(ctx))
712 set_kdamond_stop(ctx);
713 if (ctx->primitive.cleanup)
714 ctx->primitive.cleanup(ctx);
716 pr_debug("kdamond (%d) finishes\n", ctx->kdamond->pid);
717 mutex_lock(&ctx->kdamond_lock);
719 mutex_unlock(&ctx->kdamond_lock);
721 mutex_lock(&damon_lock);
723 mutex_unlock(&damon_lock);
728 #include "core-test.h"