1 // SPDX-License-Identifier: GPL-2.0-only
3 * Resource Director Technology(RDT)
4 * - Cache Allocation code.
6 * Copyright (C) 2016 Intel Corporation
9 * Fenghua Yu <fenghua.yu@intel.com>
10 * Tony Luck <tony.luck@intel.com>
12 * More information about RDT be found in the Intel (R) x86 Architecture
13 * Software Developer Manual June 2016, volume 3, section 17.17.
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/cpu.h>
19 #include <linux/kernfs.h>
20 #include <linux/seq_file.h>
21 #include <linux/slab.h>
25 * Check whether MBA bandwidth percentage value is correct. The value is
26 * checked against the minimum and max bandwidth values specified by the
27 * hardware. The allocated bandwidth percentage is rounded to the next
28 * control step available on the hardware.
30 static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r)
36 * Only linear delay values is supported for current Intel SKUs.
38 if (!r->membw.delay_linear && r->membw.arch_needs_linear) {
39 rdt_last_cmd_puts("No support for non-linear MB domains\n");
43 ret = kstrtoul(buf, 10, &bw);
45 rdt_last_cmd_printf("Non-decimal digit in MB value %s\n", buf);
49 if ((bw < r->membw.min_bw || bw > r->default_ctrl) &&
51 rdt_last_cmd_printf("MB value %ld out of range [%d,%d]\n", bw,
52 r->membw.min_bw, r->default_ctrl);
56 *data = roundup(bw, (unsigned long)r->membw.bw_gran);
60 int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s,
63 struct resctrl_staged_config *cfg;
64 struct rdt_resource *r = s->res;
67 cfg = &d->staged_config[s->conf_type];
68 if (cfg->have_new_ctrl) {
69 rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
73 if (!bw_validate(data->buf, &bw_val, r))
75 cfg->new_ctrl = bw_val;
76 cfg->have_new_ctrl = true;
82 * Check whether a cache bit mask is valid.
83 * For Intel the SDM says:
84 * Please note that all (and only) contiguous '1' combinations
85 * are allowed (e.g. FFFFH, 0FF0H, 003CH, etc.).
86 * Additionally Haswell requires at least two bits set.
87 * AMD allows non-contiguous bitmasks.
89 static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
91 unsigned long first_bit, zero_bit, val;
92 unsigned int cbm_len = r->cache.cbm_len;
95 ret = kstrtoul(buf, 16, &val);
97 rdt_last_cmd_printf("Non-hex character in the mask %s\n", buf);
101 if ((!r->cache.arch_has_empty_bitmaps && val == 0) ||
102 val > r->default_ctrl) {
103 rdt_last_cmd_puts("Mask out of range\n");
107 first_bit = find_first_bit(&val, cbm_len);
108 zero_bit = find_next_zero_bit(&val, cbm_len, first_bit);
110 /* Are non-contiguous bitmaps allowed? */
111 if (!r->cache.arch_has_sparse_bitmaps &&
112 (find_next_bit(&val, cbm_len, zero_bit) < cbm_len)) {
113 rdt_last_cmd_printf("The mask %lx has non-consecutive 1-bits\n", val);
117 if ((zero_bit - first_bit) < r->cache.min_cbm_bits) {
118 rdt_last_cmd_printf("Need at least %d bits in the mask\n",
119 r->cache.min_cbm_bits);
128 * Read one cache bit mask (hex). Check that it is valid for the current
131 int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s,
132 struct rdt_domain *d)
134 struct rdtgroup *rdtgrp = data->rdtgrp;
135 struct resctrl_staged_config *cfg;
136 struct rdt_resource *r = s->res;
139 cfg = &d->staged_config[s->conf_type];
140 if (cfg->have_new_ctrl) {
141 rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
146 * Cannot set up more than one pseudo-locked region in a cache
149 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP &&
150 rdtgroup_pseudo_locked_in_hierarchy(d)) {
151 rdt_last_cmd_puts("Pseudo-locked region in hierarchy\n");
155 if (!cbm_validate(data->buf, &cbm_val, r))
158 if ((rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
159 rdtgrp->mode == RDT_MODE_SHAREABLE) &&
160 rdtgroup_cbm_overlaps_pseudo_locked(d, cbm_val)) {
161 rdt_last_cmd_puts("CBM overlaps with pseudo-locked region\n");
166 * The CBM may not overlap with the CBM of another closid if
167 * either is exclusive.
169 if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, true)) {
170 rdt_last_cmd_puts("Overlaps with exclusive group\n");
174 if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, false)) {
175 if (rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
176 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
177 rdt_last_cmd_puts("Overlaps with other group\n");
182 cfg->new_ctrl = cbm_val;
183 cfg->have_new_ctrl = true;
189 * For each domain in this resource we expect to find a series of:
191 * separated by ";". The "id" is in decimal, and must match one of
192 * the "id"s for this resource.
194 static int parse_line(char *line, struct resctrl_schema *s,
195 struct rdtgroup *rdtgrp)
197 enum resctrl_conf_type t = s->conf_type;
198 struct resctrl_staged_config *cfg;
199 struct rdt_resource *r = s->res;
200 struct rdt_parse_data data;
201 char *dom = NULL, *id;
202 struct rdt_domain *d;
203 unsigned long dom_id;
205 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP &&
206 r->rid == RDT_RESOURCE_MBA) {
207 rdt_last_cmd_puts("Cannot pseudo-lock MBA resource\n");
212 if (!line || line[0] == '\0')
214 dom = strsep(&line, ";");
215 id = strsep(&dom, "=");
216 if (!dom || kstrtoul(id, 10, &dom_id)) {
217 rdt_last_cmd_puts("Missing '=' or non-numeric domain\n");
221 list_for_each_entry(d, &r->domains, list) {
222 if (d->id == dom_id) {
224 data.rdtgrp = rdtgrp;
225 if (r->parse_ctrlval(&data, s, d))
227 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
228 cfg = &d->staged_config[t];
230 * In pseudo-locking setup mode and just
231 * parsed a valid CBM that should be
232 * pseudo-locked. Only one locked region per
233 * resource group and domain so just do
234 * the required initialization for single
239 rdtgrp->plr->cbm = cfg->new_ctrl;
240 d->plr = rdtgrp->plr;
249 static u32 get_config_index(u32 closid, enum resctrl_conf_type type)
256 return closid * 2 + 1;
262 static bool apply_config(struct rdt_hw_domain *hw_dom,
263 struct resctrl_staged_config *cfg, u32 idx,
264 cpumask_var_t cpu_mask, bool mba_sc)
266 struct rdt_domain *dom = &hw_dom->d_resctrl;
267 u32 *dc = !mba_sc ? hw_dom->ctrl_val : hw_dom->mbps_val;
269 if (cfg->new_ctrl != dc[idx]) {
270 cpumask_set_cpu(cpumask_any(&dom->cpu_mask), cpu_mask);
271 dc[idx] = cfg->new_ctrl;
279 int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid)
281 struct resctrl_staged_config *cfg;
282 struct rdt_hw_domain *hw_dom;
283 struct msr_param msr_param;
284 enum resctrl_conf_type t;
285 cpumask_var_t cpu_mask;
286 struct rdt_domain *d;
291 if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
294 mba_sc = is_mba_sc(r);
295 msr_param.res = NULL;
296 list_for_each_entry(d, &r->domains, list) {
297 hw_dom = resctrl_to_arch_dom(d);
298 for (t = 0; t < CDP_NUM_TYPES; t++) {
299 cfg = &hw_dom->d_resctrl.staged_config[t];
300 if (!cfg->have_new_ctrl)
303 idx = get_config_index(closid, t);
304 if (!apply_config(hw_dom, cfg, idx, cpu_mask, mba_sc))
307 if (!msr_param.res) {
309 msr_param.high = msr_param.low + 1;
312 msr_param.low = min(msr_param.low, idx);
313 msr_param.high = max(msr_param.high, idx + 1);
319 * Avoid writing the control msr with control values when
320 * MBA software controller is enabled
322 if (cpumask_empty(cpu_mask) || mba_sc)
325 /* Update resource control msr on this CPU if it's in cpu_mask. */
326 if (cpumask_test_cpu(cpu, cpu_mask))
327 rdt_ctrl_update(&msr_param);
328 /* Update resource control msr on other CPUs. */
329 smp_call_function_many(cpu_mask, rdt_ctrl_update, &msr_param, 1);
333 free_cpumask_var(cpu_mask);
338 static int rdtgroup_parse_resource(char *resname, char *tok,
339 struct rdtgroup *rdtgrp)
341 struct resctrl_schema *s;
343 list_for_each_entry(s, &resctrl_schema_all, list) {
344 if (!strcmp(resname, s->name) && rdtgrp->closid < s->num_closid)
345 return parse_line(tok, s, rdtgrp);
347 rdt_last_cmd_printf("Unknown or unsupported resource name '%s'\n", resname);
351 ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
352 char *buf, size_t nbytes, loff_t off)
354 struct resctrl_schema *s;
355 struct rdtgroup *rdtgrp;
356 struct rdt_domain *dom;
357 struct rdt_resource *r;
361 /* Valid input requires a trailing newline */
362 if (nbytes == 0 || buf[nbytes - 1] != '\n')
364 buf[nbytes - 1] = '\0';
367 rdtgrp = rdtgroup_kn_lock_live(of->kn);
369 rdtgroup_kn_unlock(of->kn);
373 rdt_last_cmd_clear();
376 * No changes to pseudo-locked region allowed. It has to be removed
377 * and re-created instead.
379 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
381 rdt_last_cmd_puts("Resource group is pseudo-locked\n");
385 list_for_each_entry(s, &resctrl_schema_all, list) {
386 list_for_each_entry(dom, &s->res->domains, list)
387 memset(dom->staged_config, 0, sizeof(dom->staged_config));
390 while ((tok = strsep(&buf, "\n")) != NULL) {
391 resname = strim(strsep(&tok, ":"));
393 rdt_last_cmd_puts("Missing ':'\n");
397 if (tok[0] == '\0') {
398 rdt_last_cmd_printf("Missing '%s' value\n", resname);
402 ret = rdtgroup_parse_resource(resname, tok, rdtgrp);
407 list_for_each_entry(s, &resctrl_schema_all, list) {
409 ret = resctrl_arch_update_domains(r, rdtgrp->closid);
414 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
416 * If pseudo-locking fails we keep the resource group in
417 * mode RDT_MODE_PSEUDO_LOCKSETUP with its class of service
418 * active and updated for just the domain the pseudo-locked
419 * region was requested for.
421 ret = rdtgroup_pseudo_lock_create(rdtgrp);
425 rdtgroup_kn_unlock(of->kn);
427 return ret ?: nbytes;
430 u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d,
431 u32 closid, enum resctrl_conf_type type)
433 struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
434 u32 idx = get_config_index(closid, type);
437 return hw_dom->ctrl_val[idx];
438 return hw_dom->mbps_val[idx];
441 static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int closid)
443 struct rdt_resource *r = schema->res;
444 struct rdt_domain *dom;
448 seq_printf(s, "%*s:", max_name_width, schema->name);
449 list_for_each_entry(dom, &r->domains, list) {
453 ctrl_val = resctrl_arch_get_config(r, dom, closid,
455 seq_printf(s, r->format_str, dom->id, max_data_width,
462 int rdtgroup_schemata_show(struct kernfs_open_file *of,
463 struct seq_file *s, void *v)
465 struct resctrl_schema *schema;
466 struct rdtgroup *rdtgrp;
470 rdtgrp = rdtgroup_kn_lock_live(of->kn);
472 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
473 list_for_each_entry(schema, &resctrl_schema_all, list) {
474 seq_printf(s, "%s:uninitialized\n", schema->name);
476 } else if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
477 if (!rdtgrp->plr->d) {
478 rdt_last_cmd_clear();
479 rdt_last_cmd_puts("Cache domain offline\n");
482 seq_printf(s, "%s:%d=%x\n",
483 rdtgrp->plr->s->res->name,
488 closid = rdtgrp->closid;
489 list_for_each_entry(schema, &resctrl_schema_all, list) {
490 if (closid < schema->num_closid)
491 show_doms(s, schema, closid);
497 rdtgroup_kn_unlock(of->kn);
501 void mon_event_read(struct rmid_read *rr, struct rdt_resource *r,
502 struct rdt_domain *d, struct rdtgroup *rdtgrp,
503 int evtid, int first)
506 * setup the parameters to send to the IPI to read the data.
515 smp_call_function_any(&d->cpu_mask, mon_event_count, rr, 1);
518 int rdtgroup_mondata_show(struct seq_file *m, void *arg)
520 struct kernfs_open_file *of = m->private;
521 struct rdt_hw_resource *hw_res;
522 u32 resid, evtid, domid;
523 struct rdtgroup *rdtgrp;
524 struct rdt_resource *r;
525 union mon_data_bits md;
526 struct rdt_domain *d;
530 rdtgrp = rdtgroup_kn_lock_live(of->kn);
536 md.priv = of->kn->priv;
541 hw_res = &rdt_resources_all[resid];
542 r = &hw_res->r_resctrl;
543 d = rdt_find_domain(r, domid, NULL);
544 if (IS_ERR_OR_NULL(d)) {
549 mon_event_read(&rr, r, d, rdtgrp, evtid, false);
551 if (rr.val & RMID_VAL_ERROR)
552 seq_puts(m, "Error\n");
553 else if (rr.val & RMID_VAL_UNAVAIL)
554 seq_puts(m, "Unavailable\n");
556 seq_printf(m, "%llu\n", rr.val * hw_res->mon_scale);
559 rdtgroup_kn_unlock(of->kn);