2 * Intel CPU Microcode Update Driver for Linux
4 * Copyright (C) 2000-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
5 * 2006 Shaohua Li <shaohua.li@intel.com>
7 * Intel CPU microcode early update for Linux
9 * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com>
10 * H Peter Anvin" <hpa@zytor.com>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
19 * This needs to be before all headers so that pr_debug in printk.h doesn't turn
20 * printk calls into no_printk().
24 #define pr_fmt(fmt) "microcode: " fmt
26 #include <linux/earlycpio.h>
27 #include <linux/firmware.h>
28 #include <linux/uaccess.h>
29 #include <linux/vmalloc.h>
30 #include <linux/initrd.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/cpu.h>
36 #include <asm/microcode_intel.h>
37 #include <asm/processor.h>
38 #include <asm/tlbflush.h>
39 #include <asm/setup.h>
42 /* last level cache size per core */
43 static int llc_size_per_core;
45 static unsigned long mc_saved_in_initrd[MAX_UCODE_COUNT];
46 static struct mc_saved_data {
47 unsigned int mc_saved_count;
48 struct microcode_intel **mc_saved;
51 static enum ucode_state
52 load_microcode_early(struct microcode_intel **saved,
53 unsigned int num_saved, struct ucode_cpu_info *uci)
55 struct microcode_intel *ucode_ptr, *new_mc = NULL;
56 struct microcode_header_intel *mc_hdr;
59 new_rev = uci->cpu_sig.rev;
61 for (i = 0; i < num_saved; i++) {
63 mc_hdr = (struct microcode_header_intel *)ucode_ptr;
65 ret = has_newer_microcode(ucode_ptr,
72 new_rev = mc_hdr->rev;
79 uci->mc = (struct microcode_intel *)new_mc;
84 copy_initrd_ptrs(struct microcode_intel **mc_saved, unsigned long *initrd,
85 unsigned long off, int num_saved)
89 for (i = 0; i < num_saved; i++)
90 mc_saved[i] = (struct microcode_intel *)(initrd[i] + off);
95 microcode_phys(struct microcode_intel **mc_saved_tmp,
96 struct mc_saved_data *mc_saved_data)
99 struct microcode_intel ***mc_saved;
101 mc_saved = (struct microcode_intel ***)
102 __pa_nodebug(&mc_saved_data->mc_saved);
103 for (i = 0; i < mc_saved_data->mc_saved_count; i++) {
104 struct microcode_intel *p;
106 p = *(struct microcode_intel **)
107 __pa_nodebug(mc_saved_data->mc_saved + i);
108 mc_saved_tmp[i] = (struct microcode_intel *)__pa_nodebug(p);
113 static enum ucode_state
114 load_microcode(struct mc_saved_data *mc_saved_data, unsigned long *initrd,
115 unsigned long initrd_start, struct ucode_cpu_info *uci)
117 struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT];
118 unsigned int count = mc_saved_data->mc_saved_count;
120 if (!mc_saved_data->mc_saved) {
121 copy_initrd_ptrs(mc_saved_tmp, initrd, initrd_start, count);
123 return load_microcode_early(mc_saved_tmp, count, uci);
126 microcode_phys(mc_saved_tmp, mc_saved_data);
127 return load_microcode_early(mc_saved_tmp, count, uci);
129 return load_microcode_early(mc_saved_data->mc_saved,
136 save_microcode(struct mc_saved_data *mc_saved_data,
137 struct microcode_intel **mc_saved_src,
138 unsigned int mc_saved_count)
141 struct microcode_intel **saved_ptr;
148 * Copy new microcode data.
150 saved_ptr = kcalloc(mc_saved_count, sizeof(struct microcode_intel *), GFP_KERNEL);
154 for (i = 0; i < mc_saved_count; i++) {
155 struct microcode_header_intel *mc_hdr;
156 struct microcode_intel *mc;
159 if (!mc_saved_src[i]) {
164 mc = mc_saved_src[i];
166 size = get_totalsize(mc_hdr);
168 saved_ptr[i] = kmalloc(size, GFP_KERNEL);
174 memcpy(saved_ptr[i], mc, size);
178 * Point to newly saved microcode.
180 mc_saved_data->mc_saved = saved_ptr;
181 mc_saved_data->mc_saved_count = mc_saved_count;
186 for (j = 0; j <= i; j++)
194 * A microcode patch in ucode_ptr is saved into mc_saved
195 * - if it has matching signature and newer revision compared to an existing
197 * - or if it is a newly discovered microcode patch.
199 * The microcode patch should have matching model with CPU.
201 * Returns: The updated number @num_saved of saved microcode patches.
203 static unsigned int _save_mc(struct microcode_intel **mc_saved,
204 u8 *ucode_ptr, unsigned int num_saved)
206 struct microcode_header_intel *mc_hdr, *mc_saved_hdr;
207 unsigned int sig, pf;
210 mc_hdr = (struct microcode_header_intel *)ucode_ptr;
212 for (i = 0; i < num_saved; i++) {
213 mc_saved_hdr = (struct microcode_header_intel *)mc_saved[i];
214 sig = mc_saved_hdr->sig;
215 pf = mc_saved_hdr->pf;
217 if (!find_matching_signature(ucode_ptr, sig, pf))
222 if (mc_hdr->rev <= mc_saved_hdr->rev)
226 * Found an older ucode saved earlier. Replace it with
229 mc_saved[i] = (struct microcode_intel *)ucode_ptr;
233 /* Newly detected microcode, save it to memory. */
234 if (i >= num_saved && !found)
235 mc_saved[num_saved++] = (struct microcode_intel *)ucode_ptr;
241 * Get microcode matching with BSP's model. Only CPUs with the same model as
242 * BSP can stay in the platform.
244 static enum ucode_state __init
245 get_matching_model_microcode(int cpu, unsigned long start,
246 void *data, size_t size,
247 struct mc_saved_data *mc_saved_data,
248 unsigned long *mc_saved_in_initrd,
249 struct ucode_cpu_info *uci)
251 u8 *ucode_ptr = data;
252 unsigned int leftover = size;
253 enum ucode_state state = UCODE_OK;
254 unsigned int mc_size;
255 struct microcode_header_intel *mc_header;
256 struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT];
257 unsigned int mc_saved_count = mc_saved_data->mc_saved_count;
260 while (leftover && mc_saved_count < ARRAY_SIZE(mc_saved_tmp)) {
262 if (leftover < sizeof(mc_header))
265 mc_header = (struct microcode_header_intel *)ucode_ptr;
267 mc_size = get_totalsize(mc_header);
268 if (!mc_size || mc_size > leftover ||
269 microcode_sanity_check(ucode_ptr, 0) < 0)
275 * Since APs with same family and model as the BSP may boot in
276 * the platform, we need to find and save microcode patches
277 * with the same family and model as the BSP.
279 if (!find_matching_signature(mc_header, uci->cpu_sig.sig,
281 ucode_ptr += mc_size;
285 mc_saved_count = _save_mc(mc_saved_tmp, ucode_ptr, mc_saved_count);
287 ucode_ptr += mc_size;
295 if (mc_saved_count == 0) {
296 state = UCODE_NFOUND;
300 for (i = 0; i < mc_saved_count; i++)
301 mc_saved_in_initrd[i] = (unsigned long)mc_saved_tmp[i] - start;
303 mc_saved_data->mc_saved_count = mc_saved_count;
308 static int collect_cpu_info_early(struct ucode_cpu_info *uci)
311 unsigned int family, model;
312 struct cpu_signature csig;
313 unsigned int eax, ebx, ecx, edx;
319 memset(uci, 0, sizeof(*uci));
323 native_cpuid(&eax, &ebx, &ecx, &edx);
326 family = __x86_family(csig.sig);
327 model = x86_model(csig.sig);
329 if ((model >= 5) || (family > 6)) {
330 /* get processor flags from MSR 0x17 */
331 native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
332 csig.pf = 1 << ((val[1] >> 18) & 7);
335 csig.rev = intel_get_microcode_revision();
343 static void show_saved_mc(void)
347 unsigned int sig, pf, rev, total_size, data_size, date;
348 struct ucode_cpu_info uci;
350 if (mc_saved_data.mc_saved_count == 0) {
351 pr_debug("no microcode data saved.\n");
354 pr_debug("Total microcode saved: %d\n", mc_saved_data.mc_saved_count);
356 collect_cpu_info_early(&uci);
358 sig = uci.cpu_sig.sig;
360 rev = uci.cpu_sig.rev;
361 pr_debug("CPU: sig=0x%x, pf=0x%x, rev=0x%x\n", sig, pf, rev);
363 for (i = 0; i < mc_saved_data.mc_saved_count; i++) {
364 struct microcode_header_intel *mc_saved_header;
365 struct extended_sigtable *ext_header;
367 struct extended_signature *ext_sig;
369 mc_saved_header = (struct microcode_header_intel *)
370 mc_saved_data.mc_saved[i];
371 sig = mc_saved_header->sig;
372 pf = mc_saved_header->pf;
373 rev = mc_saved_header->rev;
374 total_size = get_totalsize(mc_saved_header);
375 data_size = get_datasize(mc_saved_header);
376 date = mc_saved_header->date;
378 pr_debug("mc_saved[%d]: sig=0x%x, pf=0x%x, rev=0x%x, toal size=0x%x, date = %04x-%02x-%02x\n",
379 i, sig, pf, rev, total_size,
382 (date >> 16) & 0xff);
384 /* Look for ext. headers: */
385 if (total_size <= data_size + MC_HEADER_SIZE)
388 ext_header = (void *) mc_saved_header + data_size + MC_HEADER_SIZE;
389 ext_sigcount = ext_header->count;
390 ext_sig = (void *)ext_header + EXT_HEADER_SIZE;
392 for (j = 0; j < ext_sigcount; j++) {
396 pr_debug("\tExtended[%d]: sig=0x%x, pf=0x%x\n",
406 #ifdef CONFIG_HOTPLUG_CPU
407 static DEFINE_MUTEX(x86_cpu_microcode_mutex);
409 * Save this mc into mc_saved_data. So it will be loaded early when a CPU is
410 * hot added or resumes.
412 * Please make sure this mc should be a valid microcode patch before calling
415 int save_mc_for_early(u8 *mc)
417 struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT];
418 unsigned int mc_saved_count_init;
419 unsigned int mc_saved_count;
420 struct microcode_intel **mc_saved;
425 * Hold hotplug lock so mc_saved_data is not accessed by a CPU in
428 mutex_lock(&x86_cpu_microcode_mutex);
430 mc_saved_count_init = mc_saved_data.mc_saved_count;
431 mc_saved_count = mc_saved_data.mc_saved_count;
432 mc_saved = mc_saved_data.mc_saved;
434 if (mc_saved && mc_saved_count)
435 memcpy(mc_saved_tmp, mc_saved,
436 mc_saved_count * sizeof(struct microcode_intel *));
438 * Save the microcode patch mc in mc_save_tmp structure if it's a newer
441 mc_saved_count = _save_mc(mc_saved_tmp, mc, mc_saved_count);
444 * Save the mc_save_tmp in global mc_saved_data.
446 ret = save_microcode(&mc_saved_data, mc_saved_tmp, mc_saved_count);
448 pr_err("Cannot save microcode patch.\n");
455 * Free old saved microcode data.
458 for (i = 0; i < mc_saved_count_init; i++)
464 mutex_unlock(&x86_cpu_microcode_mutex);
468 EXPORT_SYMBOL_GPL(save_mc_for_early);
471 static bool __init load_builtin_intel_microcode(struct cpio_data *cp)
474 unsigned int eax = 0x00000001, ebx, ecx = 0, edx;
475 unsigned int family, model, stepping;
478 native_cpuid(&eax, &ebx, &ecx, &edx);
480 family = __x86_family(eax);
481 model = x86_model(eax);
482 stepping = eax & 0xf;
484 sprintf(name, "/*(DEBLOBBED)*/", family, model, stepping);
486 return get_builtin_firmware(cp, name);
492 static __initdata char ucode_name[] = "/*(DEBLOBBED)*/";
493 static __init enum ucode_state
494 scan_microcode(struct mc_saved_data *mc_saved_data, unsigned long *initrd,
495 unsigned long start, unsigned long size,
496 struct ucode_cpu_info *uci)
501 char *p = (char *)__pa_nodebug(ucode_name);
503 char *p = ucode_name;
509 /* try built-in microcode if no initrd */
511 if (!load_builtin_intel_microcode(&cd))
514 cd = find_cpio_data(p, (void *)start, size, &offset);
519 return get_matching_model_microcode(0, start, cd.data, cd.size,
520 mc_saved_data, initrd, uci);
524 * Print ucode update info.
527 print_ucode_info(struct ucode_cpu_info *uci, unsigned int date)
529 int cpu = smp_processor_id();
531 pr_info("CPU%d microcode updated early to revision 0x%x, date = %04x-%02x-%02x\n",
536 (date >> 16) & 0xff);
541 static int delay_ucode_info;
542 static int current_mc_date;
545 * Print early updated ucode info after printk works. This is delayed info dump.
547 void show_ucode_info_early(void)
549 struct ucode_cpu_info uci;
551 if (delay_ucode_info) {
552 collect_cpu_info_early(&uci);
553 print_ucode_info(&uci, current_mc_date);
554 delay_ucode_info = 0;
559 * At this point, we can not call printk() yet. Keep microcode patch number in
560 * mc_saved_data.mc_saved and delay printing microcode info in
561 * show_ucode_info_early() until printk() works.
563 static void print_ucode(struct ucode_cpu_info *uci)
565 struct microcode_intel *mc_intel;
566 int *delay_ucode_info_p;
567 int *current_mc_date_p;
570 if (mc_intel == NULL)
573 delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info);
574 current_mc_date_p = (int *)__pa_nodebug(¤t_mc_date);
576 *delay_ucode_info_p = 1;
577 *current_mc_date_p = mc_intel->hdr.date;
582 * Flush global tlb. We only do this in x86_64 where paging has been enabled
583 * already and PGE should be enabled as well.
585 static inline void flush_tlb_early(void)
587 __native_flush_tlb_global_irq_disabled();
590 static inline void print_ucode(struct ucode_cpu_info *uci)
592 struct microcode_intel *mc_intel;
595 if (mc_intel == NULL)
598 print_ucode_info(uci, mc_intel->hdr.date);
602 static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
604 struct microcode_intel *mc_intel;
608 if (mc_intel == NULL)
612 * Save us the MSR write below - which is a particular expensive
613 * operation - when the other hyperthread has updated the microcode
616 rev = intel_get_microcode_revision();
617 if (rev >= mc_intel->hdr.rev) {
618 uci->cpu_sig.rev = rev;
622 /* write microcode via MSR 0x79 */
623 native_wrmsr(MSR_IA32_UCODE_WRITE,
624 (unsigned long) mc_intel->bits,
625 (unsigned long) mc_intel->bits >> 16 >> 16);
627 rev = intel_get_microcode_revision();
628 if (rev != mc_intel->hdr.rev)
632 /* Flush global tlb. This is precaution. */
635 uci->cpu_sig.rev = rev;
640 print_ucode_info(uci, mc_intel->hdr.date);
646 * This function converts microcode patch offsets previously stored in
647 * mc_saved_in_initrd to pointers and stores the pointers in mc_saved_data.
649 int __init save_microcode_in_initrd_intel(void)
651 unsigned int count = mc_saved_data.mc_saved_count;
652 struct microcode_intel *mc_saved[MAX_UCODE_COUNT];
658 copy_initrd_ptrs(mc_saved, mc_saved_in_initrd, get_initrd_start(), count);
659 ret = save_microcode(&mc_saved_data, mc_saved, count);
661 pr_err("Cannot save microcode patches from initrd.\n");
669 _load_ucode_intel_bsp(struct mc_saved_data *mc_saved_data,
670 unsigned long *initrd,
671 unsigned long start, unsigned long size)
673 struct ucode_cpu_info uci;
674 enum ucode_state ret;
676 collect_cpu_info_early(&uci);
678 ret = scan_microcode(mc_saved_data, initrd, start, size, &uci);
682 ret = load_microcode(mc_saved_data, initrd, start, &uci);
686 apply_microcode_early(&uci, true);
689 void __init load_ucode_intel_bsp(void)
693 struct boot_params *p;
695 p = (struct boot_params *)__pa_nodebug(&boot_params);
696 size = p->hdr.ramdisk_size;
699 * Set start only if we have an initrd image. We cannot use initrd_start
700 * because it is not set that early yet.
702 start = (size ? p->hdr.ramdisk_image : 0);
704 _load_ucode_intel_bsp((struct mc_saved_data *)__pa_nodebug(&mc_saved_data),
705 (unsigned long *)__pa_nodebug(&mc_saved_in_initrd),
708 size = boot_params.hdr.ramdisk_size;
709 start = (size ? boot_params.hdr.ramdisk_image + PAGE_OFFSET : 0);
711 _load_ucode_intel_bsp(&mc_saved_data, mc_saved_in_initrd, start, size);
715 void load_ucode_intel_ap(void)
717 struct mc_saved_data *mc_saved_data_p;
718 struct ucode_cpu_info uci;
719 unsigned long *mc_saved_in_initrd_p;
720 enum ucode_state ret;
723 mc_saved_in_initrd_p = (unsigned long *)__pa_nodebug(mc_saved_in_initrd);
724 mc_saved_data_p = (struct mc_saved_data *)__pa_nodebug(&mc_saved_data);
726 mc_saved_in_initrd_p = mc_saved_in_initrd;
727 mc_saved_data_p = &mc_saved_data;
731 * If there is no valid ucode previously saved in memory, no need to
732 * update ucode on this AP.
734 if (mc_saved_data_p->mc_saved_count == 0)
737 collect_cpu_info_early(&uci);
738 ret = load_microcode(mc_saved_data_p, mc_saved_in_initrd_p,
739 get_initrd_start_addr(), &uci);
744 apply_microcode_early(&uci, true);
747 void reload_ucode_intel(void)
749 struct ucode_cpu_info uci;
750 enum ucode_state ret;
752 if (!mc_saved_data.mc_saved_count)
755 collect_cpu_info_early(&uci);
757 ret = load_microcode_early(mc_saved_data.mc_saved,
758 mc_saved_data.mc_saved_count, &uci);
762 apply_microcode_early(&uci, false);
765 static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
767 struct cpuinfo_x86 *c = &cpu_data(cpu_num);
770 memset(csig, 0, sizeof(*csig));
772 csig->sig = cpuid_eax(0x00000001);
774 if ((c->x86_model >= 5) || (c->x86 > 6)) {
775 /* get processor flags from MSR 0x17 */
776 rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
777 csig->pf = 1 << ((val[1] >> 18) & 7);
780 csig->rev = c->microcode;
781 pr_info("CPU%d sig=0x%x, pf=0x%x, revision=0x%x\n",
782 cpu_num, csig->sig, csig->pf, csig->rev);
788 * return 0 - no update found
789 * return 1 - found update
791 static int get_matching_mc(struct microcode_intel *mc_intel, int cpu)
793 struct cpu_signature cpu_sig;
794 unsigned int csig, cpf, crev;
796 collect_cpu_info(cpu, &cpu_sig);
802 return has_newer_microcode(mc_intel, csig, cpf, crev);
805 static int apply_microcode_intel(int cpu)
807 struct microcode_intel *mc_intel;
808 struct ucode_cpu_info *uci;
810 int cpu_num = raw_smp_processor_id();
811 struct cpuinfo_x86 *c = &cpu_data(cpu_num);
813 uci = ucode_cpu_info + cpu;
816 /* We should bind the task to the CPU */
817 BUG_ON(cpu_num != cpu);
819 if (mc_intel == NULL)
823 * Microcode on this CPU could be updated earlier. Only apply the
824 * microcode patch in mc_intel when it is newer than the one on this
827 if (get_matching_mc(mc_intel, cpu) == 0)
831 * Save us the MSR write below - which is a particular expensive
832 * operation - when the other hyperthread has updated the microcode
835 rev = intel_get_microcode_revision();
836 if (rev >= mc_intel->hdr.rev)
839 /* write microcode via MSR 0x79 */
840 wrmsr(MSR_IA32_UCODE_WRITE,
841 (unsigned long) mc_intel->bits,
842 (unsigned long) mc_intel->bits >> 16 >> 16);
844 rev = intel_get_microcode_revision();
846 if (rev != mc_intel->hdr.rev) {
847 pr_err("CPU%d update to revision 0x%x failed\n",
848 cpu_num, mc_intel->hdr.rev);
851 pr_info("CPU%d updated to revision 0x%x, date = %04x-%02x-%02x\n",
853 mc_intel->hdr.date & 0xffff,
854 mc_intel->hdr.date >> 24,
855 (mc_intel->hdr.date >> 16) & 0xff);
858 uci->cpu_sig.rev = rev;
861 /* Update boot_cpu_data's revision too, if we're on the BSP: */
862 if (c->cpu_index == boot_cpu_data.cpu_index)
863 boot_cpu_data.microcode = rev;
868 static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
869 int (*get_ucode_data)(void *, const void *, size_t))
871 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
872 u8 *ucode_ptr = data, *new_mc = NULL, *mc = NULL;
873 int new_rev = uci->cpu_sig.rev;
874 unsigned int leftover = size;
875 enum ucode_state state = UCODE_OK;
876 unsigned int curr_mc_size = 0;
877 unsigned int csig, cpf;
880 struct microcode_header_intel mc_header;
881 unsigned int mc_size;
883 if (leftover < sizeof(mc_header)) {
884 pr_err("error! Truncated header in microcode data file\n");
888 if (get_ucode_data(&mc_header, ucode_ptr, sizeof(mc_header)))
891 mc_size = get_totalsize(&mc_header);
892 if (!mc_size || mc_size > leftover) {
893 pr_err("error! Bad data in microcode data file\n");
897 /* For performance reasons, reuse mc area when possible */
898 if (!mc || mc_size > curr_mc_size) {
900 mc = vmalloc(mc_size);
903 curr_mc_size = mc_size;
906 if (get_ucode_data(mc, ucode_ptr, mc_size) ||
907 microcode_sanity_check(mc, 1) < 0) {
911 csig = uci->cpu_sig.sig;
912 cpf = uci->cpu_sig.pf;
913 if (has_newer_microcode(mc, csig, cpf, new_rev)) {
915 new_rev = mc_header.rev;
917 mc = NULL; /* trigger new vmalloc */
920 ucode_ptr += mc_size;
933 state = UCODE_NFOUND;
938 uci->mc = (struct microcode_intel *)new_mc;
941 * If early loading microcode is supported, save this mc into
942 * permanent memory. So it will be loaded early when a CPU is hot added
945 save_mc_for_early(new_mc);
947 pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
948 cpu, new_rev, uci->cpu_sig.rev);
953 static int get_ucode_fw(void *to, const void *from, size_t n)
959 static bool is_blacklisted(unsigned int cpu)
961 struct cpuinfo_x86 *c = &cpu_data(cpu);
964 * Late loading on model 79 with microcode revision less than 0x0b000021
965 * and LLC size per core bigger than 2.5MB may result in a system hang.
966 * This behavior is documented in item BDF90, #334165 (Intel Xeon
967 * Processor E7-8800/4800 v4 Product Family).
970 c->x86_model == 79 &&
971 c->x86_stepping == 0x01 &&
972 llc_size_per_core > 2621440 &&
973 c->microcode < 0x0b000021) {
974 pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
975 pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
982 static enum ucode_state request_microcode_fw(int cpu, struct device *device,
986 struct cpuinfo_x86 *c = &cpu_data(cpu);
987 const struct firmware *firmware;
988 enum ucode_state ret;
990 if (is_blacklisted(cpu))
993 sprintf(name, "/*(DEBLOBBED)*/",
994 c->x86, c->x86_model, c->x86_stepping);
996 if (reject_firmware_direct(&firmware, name, device)) {
997 pr_debug("data file %s load failed\n", name);
1001 ret = generic_load_microcode(cpu, (void *)firmware->data,
1002 firmware->size, &get_ucode_fw);
1004 release_firmware(firmware);
1009 static int get_ucode_user(void *to, const void *from, size_t n)
1011 return copy_from_user(to, from, n);
1014 static enum ucode_state
1015 request_microcode_user(int cpu, const void __user *buf, size_t size)
1017 if (is_blacklisted(cpu))
1018 return UCODE_NFOUND;
1020 return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
1023 static void microcode_fini_cpu(int cpu)
1025 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
1031 static struct microcode_ops microcode_intel_ops = {
1032 .request_microcode_user = request_microcode_user,
1033 .request_microcode_fw = request_microcode_fw,
1034 .collect_cpu_info = collect_cpu_info,
1035 .apply_microcode = apply_microcode_intel,
1036 .microcode_fini_cpu = microcode_fini_cpu,
1039 static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c)
1041 u64 llc_size = c->x86_cache_size * 1024ULL;
1043 do_div(llc_size, c->x86_max_cores);
1045 return (int)llc_size;
1048 struct microcode_ops * __init init_intel_microcode(void)
1050 struct cpuinfo_x86 *c = &boot_cpu_data;
1052 if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
1053 cpu_has(c, X86_FEATURE_IA64)) {
1054 pr_err("Intel CPU family 0x%x not supported\n", c->x86);
1058 llc_size_per_core = calc_llc_size_per_core(c);
1060 return µcode_intel_ops;