2 * Resource Director Technology(RDT)
5 * Copyright (C) 2017 Intel Corporation
8 * Vikas Shivappa <vikas.shivappa@intel.com>
10 * This replaces the cqm.c based on perf but we reuse a lot of
11 * code and datastructures originally from Peter Zijlstra and Matt Fleming.
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms and conditions of the GNU General Public License,
15 * version 2, as published by the Free Software Foundation.
17 * This program is distributed in the hope it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
22 * More information about RDT be found in the Intel (R) x86 Architecture
23 * Software Developer Manual June 2016, volume 3, section 17.17.
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 #include <asm/cpu_device_id.h>
29 #include "intel_rdt.h"
31 #define MSR_IA32_QM_CTR 0x0c8e
32 #define MSR_IA32_QM_EVTSEL 0x0c8d
37 struct list_head list;
41 * @rmid_free_lru A least recently used list of free RMIDs
42 * These RMIDs are guaranteed to have an occupancy less than the
45 static LIST_HEAD(rmid_free_lru);
48 * @rmid_limbo_count count of currently unused but (potentially)
50 * This counts RMIDs that no one is currently using but that
51 * may have a occupancy value > intel_cqm_threshold. User can change
52 * the threshold occupancy value.
54 static unsigned int rmid_limbo_count;
57 * @rmid_entry - The entry in the limbo and free lists.
59 static struct rmid_entry *rmid_ptrs;
62 * Global boolean for rdt_monitor which is true if any
63 * resource monitoring is enabled.
68 * Global to indicate which monitoring events are enabled.
70 unsigned int rdt_mon_features;
73 * This is the threshold cache occupancy at which we will consider an
74 * RMID available for re-allocation.
76 unsigned int intel_cqm_threshold;
78 static inline struct rmid_entry *__rmid_entry(u32 rmid)
80 struct rmid_entry *entry;
82 entry = &rmid_ptrs[rmid];
83 WARN_ON(entry->rmid != rmid);
88 static u64 __rmid_read(u32 rmid, u32 eventid)
93 * As per the SDM, when IA32_QM_EVTSEL.EvtID (bits 7:0) is configured
94 * with a valid event code for supported resource type and the bits
95 * IA32_QM_EVTSEL.RMID (bits 41:32) are configured with valid RMID,
96 * IA32_QM_CTR.data (bits 61:0) reports the monitored data.
97 * IA32_QM_CTR.Error (bit 63) and IA32_QM_CTR.Unavailable (bit 62)
100 wrmsr(MSR_IA32_QM_EVTSEL, eventid, rmid);
101 rdmsrl(MSR_IA32_QM_CTR, val);
106 static bool rmid_dirty(struct rmid_entry *entry)
108 u64 val = __rmid_read(entry->rmid, QOS_L3_OCCUP_EVENT_ID);
110 return val >= intel_cqm_threshold;
114 * Check the RMIDs that are marked as busy for this domain. If the
115 * reported LLC occupancy is below the threshold clear the busy bit and
116 * decrement the count. If the busy count gets to zero on an RMID, we
119 void __check_limbo(struct rdt_domain *d, bool force_free)
121 struct rmid_entry *entry;
122 struct rdt_resource *r;
123 u32 crmid = 1, nrmid;
125 r = &rdt_resources_all[RDT_RESOURCE_L3];
128 * Skip RMID 0 and start from RMID 1 and check all the RMIDs that
129 * are marked as busy for occupancy < threshold. If the occupancy
130 * is less than the threshold decrement the busy counter of the
131 * RMID and move it to the free list when the counter reaches 0.
134 nrmid = find_next_bit(d->rmid_busy_llc, r->num_rmid, crmid);
135 if (nrmid >= r->num_rmid)
138 entry = __rmid_entry(nrmid);
139 if (force_free || !rmid_dirty(entry)) {
140 clear_bit(entry->rmid, d->rmid_busy_llc);
141 if (!--entry->busy) {
143 list_add_tail(&entry->list, &rmid_free_lru);
150 bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d)
152 return find_first_bit(d->rmid_busy_llc, r->num_rmid) != r->num_rmid;
156 * As of now the RMIDs allocation is global.
157 * However we keep track of which packages the RMIDs
158 * are used to optimize the limbo list management.
162 struct rmid_entry *entry;
164 lockdep_assert_held(&rdtgroup_mutex);
166 if (list_empty(&rmid_free_lru))
167 return rmid_limbo_count ? -EBUSY : -ENOSPC;
169 entry = list_first_entry(&rmid_free_lru,
170 struct rmid_entry, list);
171 list_del(&entry->list);
176 static void add_rmid_to_limbo(struct rmid_entry *entry)
178 struct rdt_resource *r;
179 struct rdt_domain *d;
183 r = &rdt_resources_all[RDT_RESOURCE_L3];
187 list_for_each_entry(d, &r->domains, list) {
188 if (cpumask_test_cpu(cpu, &d->cpu_mask)) {
189 val = __rmid_read(entry->rmid, QOS_L3_OCCUP_EVENT_ID);
190 if (val <= intel_cqm_threshold)
195 * For the first limbo RMID in the domain,
196 * setup up the limbo worker.
198 if (!has_busy_rmid(r, d))
199 cqm_setup_limbo_handler(d, CQM_LIMBOCHECK_INTERVAL);
200 set_bit(entry->rmid, d->rmid_busy_llc);
208 list_add_tail(&entry->list, &rmid_free_lru);
211 void free_rmid(u32 rmid)
213 struct rmid_entry *entry;
218 lockdep_assert_held(&rdtgroup_mutex);
220 entry = __rmid_entry(rmid);
222 if (is_llc_occupancy_enabled())
223 add_rmid_to_limbo(entry);
225 list_add_tail(&entry->list, &rmid_free_lru);
228 static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr)
230 u64 shift = 64 - MBM_CNTR_WIDTH, chunks;
232 chunks = (cur_msr << shift) - (prev_msr << shift);
233 return chunks >>= shift;
236 static u64 __mon_event_count(u32 rmid, struct rmid_read *rr)
241 tval = __rmid_read(rmid, rr->evtid);
242 if (tval & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL)) {
246 case QOS_L3_OCCUP_EVENT_ID:
249 case QOS_L3_MBM_TOTAL_EVENT_ID:
250 m = &rr->d->mbm_total[rmid];
252 case QOS_L3_MBM_LOCAL_EVENT_ID:
253 m = &rr->d->mbm_local[rmid];
257 * Code would never reach here because an invalid
258 * event id would fail the __rmid_read.
260 return RMID_VAL_ERROR;
264 memset(m, 0, sizeof(struct mbm_state));
265 m->prev_bw_msr = m->prev_msr = tval;
269 chunks = mbm_overflow_count(m->prev_msr, tval);
273 rr->val += m->chunks;
278 * Supporting function to calculate the memory bandwidth
279 * and delta bandwidth in MBps.
281 static void mbm_bw_count(u32 rmid, struct rmid_read *rr)
283 struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3];
284 struct mbm_state *m = &rr->d->mbm_local[rmid];
285 u64 tval, cur_bw, chunks;
287 tval = __rmid_read(rmid, rr->evtid);
288 if (tval & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL))
291 chunks = mbm_overflow_count(m->prev_bw_msr, tval);
292 cur_bw = (chunks * r->mon_scale) >> 20;
295 m->delta_bw = abs(cur_bw - m->prev_bw);
296 m->delta_comp = false;
298 m->prev_bw_msr = tval;
302 * This is called via IPI to read the CQM/MBM counters
305 void mon_event_count(void *info)
307 struct rdtgroup *rdtgrp, *entry;
308 struct rmid_read *rr = info;
309 struct list_head *head;
314 ret_val = __mon_event_count(rdtgrp->mon.rmid, rr);
317 * For Ctrl groups read data from child monitor groups and
318 * add them together. Count events which are read successfully.
319 * Discard the rmid_read's reporting errors.
321 head = &rdtgrp->mon.crdtgrp_list;
323 if (rdtgrp->type == RDTCTRL_GROUP) {
324 list_for_each_entry(entry, head, mon.crdtgrp_list) {
325 if (__mon_event_count(entry->mon.rmid, rr) == 0)
330 /* Report error if none of rmid_reads are successful */
336 * Feedback loop for MBA software controller (mba_sc)
338 * mba_sc is a feedback loop where we periodically read MBM counters and
339 * adjust the bandwidth percentage values via the IA32_MBA_THRTL_MSRs so
342 * current bandwdith(cur_bw) < user specified bandwidth(user_bw)
344 * This uses the MBM counters to measure the bandwidth and MBA throttle
345 * MSRs to control the bandwidth for a particular rdtgrp. It builds on the
346 * fact that resctrl rdtgroups have both monitoring and control.
348 * The frequency of the checks is 1s and we just tag along the MBM overflow
349 * timer. Having 1s interval makes the calculation of bandwidth simpler.
351 * Although MBA's goal is to restrict the bandwidth to a maximum, there may
352 * be a need to increase the bandwidth to avoid uncecessarily restricting
353 * the L2 <-> L3 traffic.
355 * Since MBA controls the L2 external bandwidth where as MBM measures the
356 * L3 external bandwidth the following sequence could lead to such a
359 * Consider an rdtgroup which had high L3 <-> memory traffic in initial
360 * phases -> mba_sc kicks in and reduced bandwidth percentage values -> but
361 * after some time rdtgroup has mostly L2 <-> L3 traffic.
363 * In this case we may restrict the rdtgroup's L2 <-> L3 traffic as its
364 * throttle MSRs already have low percentage values. To avoid
365 * unnecessarily restricting such rdtgroups, we also increase the bandwidth.
367 static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
369 u32 closid, rmid, cur_msr, cur_msr_val, new_msr_val;
370 struct mbm_state *pmbm_data, *cmbm_data;
371 u32 cur_bw, delta_bw, user_bw;
372 struct rdt_resource *r_mba;
373 struct rdt_domain *dom_mba;
374 struct list_head *head;
375 struct rdtgroup *entry;
377 if (!is_mbm_local_enabled())
380 r_mba = &rdt_resources_all[RDT_RESOURCE_MBA];
381 closid = rgrp->closid;
382 rmid = rgrp->mon.rmid;
383 pmbm_data = &dom_mbm->mbm_local[rmid];
385 dom_mba = get_domain_from_cpu(smp_processor_id(), r_mba);
387 pr_warn_once("Failure to get domain for MBA update\n");
391 cur_bw = pmbm_data->prev_bw;
392 user_bw = dom_mba->mbps_val[closid];
393 delta_bw = pmbm_data->delta_bw;
394 cur_msr_val = dom_mba->ctrl_val[closid];
397 * For Ctrl groups read data from child monitor groups.
399 head = &rgrp->mon.crdtgrp_list;
400 list_for_each_entry(entry, head, mon.crdtgrp_list) {
401 cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid];
402 cur_bw += cmbm_data->prev_bw;
403 delta_bw += cmbm_data->delta_bw;
407 * Scale up/down the bandwidth linearly for the ctrl group. The
408 * bandwidth step is the bandwidth granularity specified by the
411 * The delta_bw is used when increasing the bandwidth so that we
412 * dont alternately increase and decrease the control values
415 * For ex: consider cur_bw = 90MBps, user_bw = 100MBps and if
416 * bandwidth step is 20MBps(> user_bw - cur_bw), we would keep
417 * switching between 90 and 110 continuously if we only check
420 if (cur_msr_val > r_mba->membw.min_bw && user_bw < cur_bw) {
421 new_msr_val = cur_msr_val - r_mba->membw.bw_gran;
422 } else if (cur_msr_val < MAX_MBA_BW &&
423 (user_bw > (cur_bw + delta_bw))) {
424 new_msr_val = cur_msr_val + r_mba->membw.bw_gran;
429 cur_msr = r_mba->msr_base + closid;
430 wrmsrl(cur_msr, delay_bw_map(new_msr_val, r_mba));
431 dom_mba->ctrl_val[closid] = new_msr_val;
434 * Delta values are updated dynamically package wise for each
435 * rdtgrp everytime the throttle MSR changes value.
437 * This is because (1)the increase in bandwidth is not perfectly
438 * linear and only "approximately" linear even when the hardware
439 * says it is linear.(2)Also since MBA is a core specific
440 * mechanism, the delta values vary based on number of cores used
443 pmbm_data->delta_comp = true;
444 list_for_each_entry(entry, head, mon.crdtgrp_list) {
445 cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid];
446 cmbm_data->delta_comp = true;
450 static void mbm_update(struct rdt_domain *d, int rmid)
458 * This is protected from concurrent reads from user
459 * as both the user and we hold the global mutex.
461 if (is_mbm_total_enabled()) {
462 rr.evtid = QOS_L3_MBM_TOTAL_EVENT_ID;
463 __mon_event_count(rmid, &rr);
465 if (is_mbm_local_enabled()) {
466 rr.evtid = QOS_L3_MBM_LOCAL_EVENT_ID;
467 __mon_event_count(rmid, &rr);
470 * Call the MBA software controller only for the
471 * control groups and when user has enabled
472 * the software controller explicitly.
475 mbm_bw_count(rmid, &rr);
480 * Handler to scan the limbo list and move the RMIDs
481 * to free list whose occupancy < threshold_occupancy.
483 void cqm_handle_limbo(struct work_struct *work)
485 unsigned long delay = msecs_to_jiffies(CQM_LIMBOCHECK_INTERVAL);
486 int cpu = smp_processor_id();
487 struct rdt_resource *r;
488 struct rdt_domain *d;
490 mutex_lock(&rdtgroup_mutex);
492 r = &rdt_resources_all[RDT_RESOURCE_L3];
493 d = get_domain_from_cpu(cpu, r);
496 pr_warn_once("Failure to get domain for limbo worker\n");
500 __check_limbo(d, false);
502 if (has_busy_rmid(r, d))
503 schedule_delayed_work_on(cpu, &d->cqm_limbo, delay);
506 mutex_unlock(&rdtgroup_mutex);
509 void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms)
511 unsigned long delay = msecs_to_jiffies(delay_ms);
512 struct rdt_resource *r;
515 r = &rdt_resources_all[RDT_RESOURCE_L3];
517 cpu = cpumask_any(&dom->cpu_mask);
518 dom->cqm_work_cpu = cpu;
520 schedule_delayed_work_on(cpu, &dom->cqm_limbo, delay);
523 void mbm_handle_overflow(struct work_struct *work)
525 unsigned long delay = msecs_to_jiffies(MBM_OVERFLOW_INTERVAL);
526 struct rdtgroup *prgrp, *crgrp;
527 int cpu = smp_processor_id();
528 struct list_head *head;
529 struct rdt_domain *d;
531 mutex_lock(&rdtgroup_mutex);
533 if (!static_branch_likely(&rdt_enable_key))
536 d = get_domain_from_cpu(cpu, &rdt_resources_all[RDT_RESOURCE_L3]);
540 list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
541 mbm_update(d, prgrp->mon.rmid);
543 head = &prgrp->mon.crdtgrp_list;
544 list_for_each_entry(crgrp, head, mon.crdtgrp_list)
545 mbm_update(d, crgrp->mon.rmid);
548 update_mba_bw(prgrp, d);
551 schedule_delayed_work_on(cpu, &d->mbm_over, delay);
554 mutex_unlock(&rdtgroup_mutex);
557 void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms)
559 unsigned long delay = msecs_to_jiffies(delay_ms);
562 if (!static_branch_likely(&rdt_enable_key))
564 cpu = cpumask_any(&dom->cpu_mask);
565 dom->mbm_work_cpu = cpu;
566 schedule_delayed_work_on(cpu, &dom->mbm_over, delay);
569 static int dom_data_init(struct rdt_resource *r)
571 struct rmid_entry *entry = NULL;
574 nr_rmids = r->num_rmid;
575 rmid_ptrs = kcalloc(nr_rmids, sizeof(struct rmid_entry), GFP_KERNEL);
579 for (i = 0; i < nr_rmids; i++) {
580 entry = &rmid_ptrs[i];
581 INIT_LIST_HEAD(&entry->list);
584 list_add_tail(&entry->list, &rmid_free_lru);
588 * RMID 0 is special and is always allocated. It's used for all
589 * tasks that are not monitored.
591 entry = __rmid_entry(0);
592 list_del(&entry->list);
597 static struct mon_evt llc_occupancy_event = {
598 .name = "llc_occupancy",
599 .evtid = QOS_L3_OCCUP_EVENT_ID,
602 static struct mon_evt mbm_total_event = {
603 .name = "mbm_total_bytes",
604 .evtid = QOS_L3_MBM_TOTAL_EVENT_ID,
607 static struct mon_evt mbm_local_event = {
608 .name = "mbm_local_bytes",
609 .evtid = QOS_L3_MBM_LOCAL_EVENT_ID,
613 * Initialize the event list for the resource.
615 * Note that MBM events are also part of RDT_RESOURCE_L3 resource
616 * because as per the SDM the total and local memory bandwidth
617 * are enumerated as part of L3 monitoring.
619 static void l3_mon_evt_init(struct rdt_resource *r)
621 INIT_LIST_HEAD(&r->evt_list);
623 if (is_llc_occupancy_enabled())
624 list_add_tail(&llc_occupancy_event.list, &r->evt_list);
625 if (is_mbm_total_enabled())
626 list_add_tail(&mbm_total_event.list, &r->evt_list);
627 if (is_mbm_local_enabled())
628 list_add_tail(&mbm_local_event.list, &r->evt_list);
631 int rdt_get_mon_l3_config(struct rdt_resource *r)
635 r->mon_scale = boot_cpu_data.x86_cache_occ_scale;
636 r->num_rmid = boot_cpu_data.x86_cache_max_rmid + 1;
639 * A reasonable upper limit on the max threshold is the number
640 * of lines tagged per RMID if all RMIDs have the same number of
641 * lines tagged in the LLC.
643 * For a 35MB LLC and 56 RMIDs, this is ~1.8% of the LLC.
645 intel_cqm_threshold = boot_cpu_data.x86_cache_size * 1024 / r->num_rmid;
647 /* h/w works in units of "boot_cpu_data.x86_cache_occ_scale" */
648 intel_cqm_threshold /= r->mon_scale;
650 ret = dom_data_init(r);
656 r->mon_capable = true;
657 r->mon_enabled = true;