1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   *  AMD CPU Microcode Update Driver for Linux
4   *
5   *  This driver allows to upgrade microcode on F10h AMD
6   *  CPUs and later.
7   *
8   *  Copyright (C) 2008-2011 Advanced Micro Devices Inc.
9   *	          2013-2018 Borislav Petkov <bp@alien8.de>
10   *
11   *  Author: Peter Oruba <peter.oruba@amd.com>
12   *
13   *  Based on work by:
14   *  Tigran Aivazian <aivazian.tigran@gmail.com>
15   *
16   *  early loader:
17   *  Copyright (C) 2013 Advanced Micro Devices, Inc.
18   *
19   *  Author: Jacob Shin <jacob.shin@amd.com>
20   *  Fixes: Borislav Petkov <bp@suse.de>
21   */
22  #define pr_fmt(fmt) "microcode: " fmt
23  
24  #include <linux/earlycpio.h>
25  #include <linux/firmware.h>
26  #include <linux/uaccess.h>
27  #include <linux/vmalloc.h>
28  #include <linux/initrd.h>
29  #include <linux/kernel.h>
30  #include <linux/pci.h>
31  
32  #include <asm/microcode.h>
33  #include <asm/processor.h>
34  #include <asm/setup.h>
35  #include <asm/cpu.h>
36  #include <asm/msr.h>
37  
38  #include "internal.h"
39  
40  struct ucode_patch {
41  	struct list_head plist;
42  	void *data;
43  	unsigned int size;
44  	u32 patch_id;
45  	u16 equiv_cpu;
46  };
47  
48  static LIST_HEAD(microcode_cache);
49  
50  #define UCODE_MAGIC			0x00414d44
51  #define UCODE_EQUIV_CPU_TABLE_TYPE	0x00000000
52  #define UCODE_UCODE_TYPE		0x00000001
53  
54  #define SECTION_HDR_SIZE		8
55  #define CONTAINER_HDR_SZ		12
56  
57  struct equiv_cpu_entry {
58  	u32	installed_cpu;
59  	u32	fixed_errata_mask;
60  	u32	fixed_errata_compare;
61  	u16	equiv_cpu;
62  	u16	res;
63  } __packed;
64  
65  struct microcode_header_amd {
66  	u32	data_code;
67  	u32	patch_id;
68  	u16	mc_patch_data_id;
69  	u8	mc_patch_data_len;
70  	u8	init_flag;
71  	u32	mc_patch_data_checksum;
72  	u32	nb_dev_id;
73  	u32	sb_dev_id;
74  	u16	processor_rev_id;
75  	u8	nb_rev_id;
76  	u8	sb_rev_id;
77  	u8	bios_api_rev;
78  	u8	reserved1[3];
79  	u32	match_reg[8];
80  } __packed;
81  
82  struct microcode_amd {
83  	struct microcode_header_amd	hdr;
84  	unsigned int			mpb[];
85  };
86  
87  static struct equiv_cpu_table {
88  	unsigned int num_entries;
89  	struct equiv_cpu_entry *entry;
90  } equiv_table;
91  
92  union zen_patch_rev {
93  	struct {
94  		__u32 rev	 : 8,
95  		      stepping	 : 4,
96  		      model	 : 4,
97  		      __reserved : 4,
98  		      ext_model	 : 4,
99  		      ext_fam	 : 8;
100  	};
101  	__u32 ucode_rev;
102  };
103  
104  union cpuid_1_eax {
105  	struct {
106  		__u32 stepping    : 4,
107  		      model	  : 4,
108  		      family	  : 4,
109  		      __reserved0 : 4,
110  		      ext_model   : 4,
111  		      ext_fam     : 8,
112  		      __reserved1 : 4;
113  	};
114  	__u32 full;
115  };
116  
117  /*
118   * This points to the current valid container of microcode patches which we will
119   * save from the initrd/builtin before jettisoning its contents. @mc is the
120   * microcode patch we found to match.
121   */
122  struct cont_desc {
123  	struct microcode_amd *mc;
124  	u32		     psize;
125  	u8		     *data;
126  	size_t		     size;
127  };
128  
129  /*
130   * Microcode patch container file is prepended to the initrd in cpio
131   * format. See Documentation/arch/x86/microcode.rst
132   */
133  static const char
134  ucode_path[] __maybe_unused = "kernel/x86/microcode/AuthenticAMD.bin";
135  
136  /*
137   * This is CPUID(1).EAX on the BSP. It is used in two ways:
138   *
139   * 1. To ignore the equivalence table on Zen1 and newer.
140   *
141   * 2. To match which patches to load because the patch revision ID
142   *    already contains the f/m/s for which the microcode is destined
143   *    for.
144   */
145  static u32 bsp_cpuid_1_eax __ro_after_init;
146  
ucode_rev_to_cpuid(unsigned int val)147  static union cpuid_1_eax ucode_rev_to_cpuid(unsigned int val)
148  {
149  	union zen_patch_rev p;
150  	union cpuid_1_eax c;
151  
152  	p.ucode_rev = val;
153  	c.full = 0;
154  
155  	c.stepping  = p.stepping;
156  	c.model     = p.model;
157  	c.ext_model = p.ext_model;
158  	c.family    = 0xf;
159  	c.ext_fam   = p.ext_fam;
160  
161  	return c;
162  }
163  
find_equiv_id(struct equiv_cpu_table * et,u32 sig)164  static u16 find_equiv_id(struct equiv_cpu_table *et, u32 sig)
165  {
166  	unsigned int i;
167  
168  	/* Zen and newer do not need an equivalence table. */
169  	if (x86_family(bsp_cpuid_1_eax) >= 0x17)
170  		return 0;
171  
172  	if (!et || !et->num_entries)
173  		return 0;
174  
175  	for (i = 0; i < et->num_entries; i++) {
176  		struct equiv_cpu_entry *e = &et->entry[i];
177  
178  		if (sig == e->installed_cpu)
179  			return e->equiv_cpu;
180  	}
181  	return 0;
182  }
183  
184  /*
185   * Check whether there is a valid microcode container file at the beginning
186   * of @buf of size @buf_size.
187   */
verify_container(const u8 * buf,size_t buf_size)188  static bool verify_container(const u8 *buf, size_t buf_size)
189  {
190  	u32 cont_magic;
191  
192  	if (buf_size <= CONTAINER_HDR_SZ) {
193  		pr_debug("Truncated microcode container header.\n");
194  		return false;
195  	}
196  
197  	cont_magic = *(const u32 *)buf;
198  	if (cont_magic != UCODE_MAGIC) {
199  		pr_debug("Invalid magic value (0x%08x).\n", cont_magic);
200  		return false;
201  	}
202  
203  	return true;
204  }
205  
206  /*
207   * Check whether there is a valid, non-truncated CPU equivalence table at the
208   * beginning of @buf of size @buf_size.
209   */
verify_equivalence_table(const u8 * buf,size_t buf_size)210  static bool verify_equivalence_table(const u8 *buf, size_t buf_size)
211  {
212  	const u32 *hdr = (const u32 *)buf;
213  	u32 cont_type, equiv_tbl_len;
214  
215  	if (!verify_container(buf, buf_size))
216  		return false;
217  
218  	/* Zen and newer do not need an equivalence table. */
219  	if (x86_family(bsp_cpuid_1_eax) >= 0x17)
220  		return true;
221  
222  	cont_type = hdr[1];
223  	if (cont_type != UCODE_EQUIV_CPU_TABLE_TYPE) {
224  		pr_debug("Wrong microcode container equivalence table type: %u.\n",
225  			 cont_type);
226  		return false;
227  	}
228  
229  	buf_size -= CONTAINER_HDR_SZ;
230  
231  	equiv_tbl_len = hdr[2];
232  	if (equiv_tbl_len < sizeof(struct equiv_cpu_entry) ||
233  	    buf_size < equiv_tbl_len) {
234  		pr_debug("Truncated equivalence table.\n");
235  		return false;
236  	}
237  
238  	return true;
239  }
240  
241  /*
242   * Check whether there is a valid, non-truncated microcode patch section at the
243   * beginning of @buf of size @buf_size.
244   *
245   * On success, @sh_psize returns the patch size according to the section header,
246   * to the caller.
247   */
248  static bool
__verify_patch_section(const u8 * buf,size_t buf_size,u32 * sh_psize)249  __verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize)
250  {
251  	u32 p_type, p_size;
252  	const u32 *hdr;
253  
254  	if (buf_size < SECTION_HDR_SIZE) {
255  		pr_debug("Truncated patch section.\n");
256  		return false;
257  	}
258  
259  	hdr = (const u32 *)buf;
260  	p_type = hdr[0];
261  	p_size = hdr[1];
262  
263  	if (p_type != UCODE_UCODE_TYPE) {
264  		pr_debug("Invalid type field (0x%x) in container file section header.\n",
265  			 p_type);
266  		return false;
267  	}
268  
269  	if (p_size < sizeof(struct microcode_header_amd)) {
270  		pr_debug("Patch of size %u too short.\n", p_size);
271  		return false;
272  	}
273  
274  	*sh_psize = p_size;
275  
276  	return true;
277  }
278  
279  /*
280   * Check whether the passed remaining file @buf_size is large enough to contain
281   * a patch of the indicated @sh_psize (and also whether this size does not
282   * exceed the per-family maximum). @sh_psize is the size read from the section
283   * header.
284   */
__verify_patch_size(u32 sh_psize,size_t buf_size)285  static unsigned int __verify_patch_size(u32 sh_psize, size_t buf_size)
286  {
287  	u8 family = x86_family(bsp_cpuid_1_eax);
288  	u32 max_size;
289  
290  	if (family >= 0x15)
291  		return min_t(u32, sh_psize, buf_size);
292  
293  #define F1XH_MPB_MAX_SIZE 2048
294  #define F14H_MPB_MAX_SIZE 1824
295  
296  	switch (family) {
297  	case 0x10 ... 0x12:
298  		max_size = F1XH_MPB_MAX_SIZE;
299  		break;
300  	case 0x14:
301  		max_size = F14H_MPB_MAX_SIZE;
302  		break;
303  	default:
304  		WARN(1, "%s: WTF family: 0x%x\n", __func__, family);
305  		return 0;
306  	}
307  
308  	if (sh_psize > min_t(u32, buf_size, max_size))
309  		return 0;
310  
311  	return sh_psize;
312  }
313  
314  /*
315   * Verify the patch in @buf.
316   *
317   * Returns:
318   * negative: on error
319   * positive: patch is not for this family, skip it
320   * 0: success
321   */
verify_patch(const u8 * buf,size_t buf_size,u32 * patch_size)322  static int verify_patch(const u8 *buf, size_t buf_size, u32 *patch_size)
323  {
324  	u8 family = x86_family(bsp_cpuid_1_eax);
325  	struct microcode_header_amd *mc_hdr;
326  	unsigned int ret;
327  	u32 sh_psize;
328  	u16 proc_id;
329  	u8 patch_fam;
330  
331  	if (!__verify_patch_section(buf, buf_size, &sh_psize))
332  		return -1;
333  
334  	/*
335  	 * The section header length is not included in this indicated size
336  	 * but is present in the leftover file length so we need to subtract
337  	 * it before passing this value to the function below.
338  	 */
339  	buf_size -= SECTION_HDR_SIZE;
340  
341  	/*
342  	 * Check if the remaining buffer is big enough to contain a patch of
343  	 * size sh_psize, as the section claims.
344  	 */
345  	if (buf_size < sh_psize) {
346  		pr_debug("Patch of size %u truncated.\n", sh_psize);
347  		return -1;
348  	}
349  
350  	ret = __verify_patch_size(sh_psize, buf_size);
351  	if (!ret) {
352  		pr_debug("Per-family patch size mismatch.\n");
353  		return -1;
354  	}
355  
356  	*patch_size = sh_psize;
357  
358  	mc_hdr	= (struct microcode_header_amd *)(buf + SECTION_HDR_SIZE);
359  	if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) {
360  		pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n", mc_hdr->patch_id);
361  		return -1;
362  	}
363  
364  	proc_id	= mc_hdr->processor_rev_id;
365  	patch_fam = 0xf + (proc_id >> 12);
366  	if (patch_fam != family)
367  		return 1;
368  
369  	return 0;
370  }
371  
mc_patch_matches(struct microcode_amd * mc,u16 eq_id)372  static bool mc_patch_matches(struct microcode_amd *mc, u16 eq_id)
373  {
374  	/* Zen and newer do not need an equivalence table. */
375  	if (x86_family(bsp_cpuid_1_eax) >= 0x17)
376  		return ucode_rev_to_cpuid(mc->hdr.patch_id).full == bsp_cpuid_1_eax;
377  	else
378  		return eq_id == mc->hdr.processor_rev_id;
379  }
380  
381  /*
382   * This scans the ucode blob for the proper container as we can have multiple
383   * containers glued together. Returns the equivalence ID from the equivalence
384   * table or 0 if none found.
385   * Returns the amount of bytes consumed while scanning. @desc contains all the
386   * data we're going to use in later stages of the application.
387   */
parse_container(u8 * ucode,size_t size,struct cont_desc * desc)388  static size_t parse_container(u8 *ucode, size_t size, struct cont_desc *desc)
389  {
390  	struct equiv_cpu_table table;
391  	size_t orig_size = size;
392  	u32 *hdr = (u32 *)ucode;
393  	u16 eq_id;
394  	u8 *buf;
395  
396  	if (!verify_equivalence_table(ucode, size))
397  		return 0;
398  
399  	buf = ucode;
400  
401  	table.entry = (struct equiv_cpu_entry *)(buf + CONTAINER_HDR_SZ);
402  	table.num_entries = hdr[2] / sizeof(struct equiv_cpu_entry);
403  
404  	/*
405  	 * Find the equivalence ID of our CPU in this table. Even if this table
406  	 * doesn't contain a patch for the CPU, scan through the whole container
407  	 * so that it can be skipped in case there are other containers appended.
408  	 */
409  	eq_id = find_equiv_id(&table, bsp_cpuid_1_eax);
410  
411  	buf  += hdr[2] + CONTAINER_HDR_SZ;
412  	size -= hdr[2] + CONTAINER_HDR_SZ;
413  
414  	/*
415  	 * Scan through the rest of the container to find where it ends. We do
416  	 * some basic sanity-checking too.
417  	 */
418  	while (size > 0) {
419  		struct microcode_amd *mc;
420  		u32 patch_size;
421  		int ret;
422  
423  		ret = verify_patch(buf, size, &patch_size);
424  		if (ret < 0) {
425  			/*
426  			 * Patch verification failed, skip to the next container, if
427  			 * there is one. Before exit, check whether that container has
428  			 * found a patch already. If so, use it.
429  			 */
430  			goto out;
431  		} else if (ret > 0) {
432  			goto skip;
433  		}
434  
435  		mc = (struct microcode_amd *)(buf + SECTION_HDR_SIZE);
436  		if (mc_patch_matches(mc, eq_id)) {
437  			desc->psize = patch_size;
438  			desc->mc = mc;
439  		}
440  
441  skip:
442  		/* Skip patch section header too: */
443  		buf  += patch_size + SECTION_HDR_SIZE;
444  		size -= patch_size + SECTION_HDR_SIZE;
445  	}
446  
447  out:
448  	/*
449  	 * If we have found a patch (desc->mc), it means we're looking at the
450  	 * container which has a patch for this CPU so return 0 to mean, @ucode
451  	 * already points to the proper container. Otherwise, we return the size
452  	 * we scanned so that we can advance to the next container in the
453  	 * buffer.
454  	 */
455  	if (desc->mc) {
456  		desc->data = ucode;
457  		desc->size = orig_size - size;
458  
459  		return 0;
460  	}
461  
462  	return orig_size - size;
463  }
464  
465  /*
466   * Scan the ucode blob for the proper container as we can have multiple
467   * containers glued together.
468   */
scan_containers(u8 * ucode,size_t size,struct cont_desc * desc)469  static void scan_containers(u8 *ucode, size_t size, struct cont_desc *desc)
470  {
471  	while (size) {
472  		size_t s = parse_container(ucode, size, desc);
473  		if (!s)
474  			return;
475  
476  		/* catch wraparound */
477  		if (size >= s) {
478  			ucode += s;
479  			size  -= s;
480  		} else {
481  			return;
482  		}
483  	}
484  }
485  
__apply_microcode_amd(struct microcode_amd * mc)486  static int __apply_microcode_amd(struct microcode_amd *mc)
487  {
488  	u32 rev, dummy;
489  
490  	native_wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc->hdr.data_code);
491  
492  	/* verify patch application was successful */
493  	native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
494  
495  	if (rev != mc->hdr.patch_id)
496  		return -1;
497  
498  	return 0;
499  }
500  
501  /*
502   * Early load occurs before we can vmalloc(). So we look for the microcode
503   * patch container file in initrd, traverse equivalent cpu table, look for a
504   * matching microcode patch, and update, all in initrd memory in place.
505   * When vmalloc() is available for use later -- on 64-bit during first AP load,
506   * and on 32-bit during save_microcode_in_initrd_amd() -- we can call
507   * load_microcode_amd() to save equivalent cpu table and microcode patches in
508   * kernel heap memory.
509   *
510   * Returns true if container found (sets @desc), false otherwise.
511   */
early_apply_microcode(u32 old_rev,void * ucode,size_t size)512  static bool early_apply_microcode(u32 old_rev, void *ucode, size_t size)
513  {
514  	struct cont_desc desc = { 0 };
515  	struct microcode_amd *mc;
516  	bool ret = false;
517  
518  	scan_containers(ucode, size, &desc);
519  
520  	mc = desc.mc;
521  	if (!mc)
522  		return ret;
523  
524  	/*
525  	 * Allow application of the same revision to pick up SMT-specific
526  	 * changes even if the revision of the other SMT thread is already
527  	 * up-to-date.
528  	 */
529  	if (old_rev > mc->hdr.patch_id)
530  		return ret;
531  
532  	return !__apply_microcode_amd(mc);
533  }
534  
get_builtin_microcode(struct cpio_data * cp)535  static bool get_builtin_microcode(struct cpio_data *cp)
536  {
537  	char fw_name[36] = "amd-ucode/microcode_amd.bin";
538  	u8 family = x86_family(bsp_cpuid_1_eax);
539  	struct firmware fw;
540  
541  	if (IS_ENABLED(CONFIG_X86_32))
542  		return false;
543  
544  	if (family >= 0x15)
545  		snprintf(fw_name, sizeof(fw_name),
546  			 "amd-ucode/microcode_amd_fam%02hhxh.bin", family);
547  
548  	if (firmware_request_builtin(&fw, fw_name)) {
549  		cp->size = fw.size;
550  		cp->data = (void *)fw.data;
551  		return true;
552  	}
553  
554  	return false;
555  }
556  
find_blobs_in_containers(struct cpio_data * ret)557  static void __init find_blobs_in_containers(struct cpio_data *ret)
558  {
559  	struct cpio_data cp;
560  
561  	if (!get_builtin_microcode(&cp))
562  		cp = find_microcode_in_initrd(ucode_path);
563  
564  	*ret = cp;
565  }
566  
load_ucode_amd_bsp(struct early_load_data * ed,unsigned int cpuid_1_eax)567  void __init load_ucode_amd_bsp(struct early_load_data *ed, unsigned int cpuid_1_eax)
568  {
569  	struct cpio_data cp = { };
570  	u32 dummy;
571  
572  	bsp_cpuid_1_eax = cpuid_1_eax;
573  
574  	native_rdmsr(MSR_AMD64_PATCH_LEVEL, ed->old_rev, dummy);
575  
576  	/* Needed in load_microcode_amd() */
577  	ucode_cpu_info[0].cpu_sig.sig = cpuid_1_eax;
578  
579  	find_blobs_in_containers(&cp);
580  	if (!(cp.data && cp.size))
581  		return;
582  
583  	if (early_apply_microcode(ed->old_rev, cp.data, cp.size))
584  		native_rdmsr(MSR_AMD64_PATCH_LEVEL, ed->new_rev, dummy);
585  }
586  
587  static enum ucode_state _load_microcode_amd(u8 family, const u8 *data, size_t size);
588  
save_microcode_in_initrd(void)589  static int __init save_microcode_in_initrd(void)
590  {
591  	unsigned int cpuid_1_eax = native_cpuid_eax(1);
592  	struct cpuinfo_x86 *c = &boot_cpu_data;
593  	struct cont_desc desc = { 0 };
594  	enum ucode_state ret;
595  	struct cpio_data cp;
596  
597  	if (dis_ucode_ldr || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10)
598  		return 0;
599  
600  	find_blobs_in_containers(&cp);
601  	if (!(cp.data && cp.size))
602  		return -EINVAL;
603  
604  	scan_containers(cp.data, cp.size, &desc);
605  	if (!desc.mc)
606  		return -EINVAL;
607  
608  	ret = _load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size);
609  	if (ret > UCODE_UPDATED)
610  		return -EINVAL;
611  
612  	return 0;
613  }
614  early_initcall(save_microcode_in_initrd);
615  
patch_cpus_equivalent(struct ucode_patch * p,struct ucode_patch * n,bool ignore_stepping)616  static inline bool patch_cpus_equivalent(struct ucode_patch *p,
617  					 struct ucode_patch *n,
618  					 bool ignore_stepping)
619  {
620  	/* Zen and newer hardcode the f/m/s in the patch ID */
621          if (x86_family(bsp_cpuid_1_eax) >= 0x17) {
622  		union cpuid_1_eax p_cid = ucode_rev_to_cpuid(p->patch_id);
623  		union cpuid_1_eax n_cid = ucode_rev_to_cpuid(n->patch_id);
624  
625  		if (ignore_stepping) {
626  			p_cid.stepping = 0;
627  			n_cid.stepping = 0;
628  		}
629  
630  		return p_cid.full == n_cid.full;
631  	} else {
632  		return p->equiv_cpu == n->equiv_cpu;
633  	}
634  }
635  
636  /*
637   * a small, trivial cache of per-family ucode patches
638   */
cache_find_patch(struct ucode_cpu_info * uci,u16 equiv_cpu)639  static struct ucode_patch *cache_find_patch(struct ucode_cpu_info *uci, u16 equiv_cpu)
640  {
641  	struct ucode_patch *p;
642  	struct ucode_patch n;
643  
644  	n.equiv_cpu = equiv_cpu;
645  	n.patch_id  = uci->cpu_sig.rev;
646  
647  	WARN_ON_ONCE(!n.patch_id);
648  
649  	list_for_each_entry(p, &microcode_cache, plist)
650  		if (patch_cpus_equivalent(p, &n, false))
651  			return p;
652  
653  	return NULL;
654  }
655  
patch_newer(struct ucode_patch * p,struct ucode_patch * n)656  static inline int patch_newer(struct ucode_patch *p, struct ucode_patch *n)
657  {
658  	/* Zen and newer hardcode the f/m/s in the patch ID */
659          if (x86_family(bsp_cpuid_1_eax) >= 0x17) {
660  		union zen_patch_rev zp, zn;
661  
662  		zp.ucode_rev = p->patch_id;
663  		zn.ucode_rev = n->patch_id;
664  
665  		if (zn.stepping != zp.stepping)
666  			return -1;
667  
668  		return zn.rev > zp.rev;
669  	} else {
670  		return n->patch_id > p->patch_id;
671  	}
672  }
673  
update_cache(struct ucode_patch * new_patch)674  static void update_cache(struct ucode_patch *new_patch)
675  {
676  	struct ucode_patch *p;
677  	int ret;
678  
679  	list_for_each_entry(p, &microcode_cache, plist) {
680  		if (patch_cpus_equivalent(p, new_patch, true)) {
681  			ret = patch_newer(p, new_patch);
682  			if (ret < 0)
683  				continue;
684  			else if (!ret) {
685  				/* we already have the latest patch */
686  				kfree(new_patch->data);
687  				kfree(new_patch);
688  				return;
689  			}
690  
691  			list_replace(&p->plist, &new_patch->plist);
692  			kfree(p->data);
693  			kfree(p);
694  			return;
695  		}
696  	}
697  	/* no patch found, add it */
698  	list_add_tail(&new_patch->plist, &microcode_cache);
699  }
700  
free_cache(void)701  static void free_cache(void)
702  {
703  	struct ucode_patch *p, *tmp;
704  
705  	list_for_each_entry_safe(p, tmp, &microcode_cache, plist) {
706  		__list_del(p->plist.prev, p->plist.next);
707  		kfree(p->data);
708  		kfree(p);
709  	}
710  }
711  
find_patch(unsigned int cpu)712  static struct ucode_patch *find_patch(unsigned int cpu)
713  {
714  	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
715  	u32 rev, dummy __always_unused;
716  	u16 equiv_id = 0;
717  
718  	/* fetch rev if not populated yet: */
719  	if (!uci->cpu_sig.rev) {
720  		rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
721  		uci->cpu_sig.rev = rev;
722  	}
723  
724  	if (x86_family(bsp_cpuid_1_eax) < 0x17) {
725  		equiv_id = find_equiv_id(&equiv_table, uci->cpu_sig.sig);
726  		if (!equiv_id)
727  			return NULL;
728  	}
729  
730  	return cache_find_patch(uci, equiv_id);
731  }
732  
reload_ucode_amd(unsigned int cpu)733  void reload_ucode_amd(unsigned int cpu)
734  {
735  	u32 rev, dummy __always_unused;
736  	struct microcode_amd *mc;
737  	struct ucode_patch *p;
738  
739  	p = find_patch(cpu);
740  	if (!p)
741  		return;
742  
743  	mc = p->data;
744  
745  	rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
746  
747  	if (rev < mc->hdr.patch_id) {
748  		if (!__apply_microcode_amd(mc))
749  			pr_info_once("reload revision: 0x%08x\n", mc->hdr.patch_id);
750  	}
751  }
752  
collect_cpu_info_amd(int cpu,struct cpu_signature * csig)753  static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
754  {
755  	struct cpuinfo_x86 *c = &cpu_data(cpu);
756  	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
757  	struct ucode_patch *p;
758  
759  	csig->sig = cpuid_eax(0x00000001);
760  	csig->rev = c->microcode;
761  
762  	/*
763  	 * a patch could have been loaded early, set uci->mc so that
764  	 * mc_bp_resume() can call apply_microcode()
765  	 */
766  	p = find_patch(cpu);
767  	if (p && (p->patch_id == csig->rev))
768  		uci->mc = p->data;
769  
770  	return 0;
771  }
772  
apply_microcode_amd(int cpu)773  static enum ucode_state apply_microcode_amd(int cpu)
774  {
775  	struct cpuinfo_x86 *c = &cpu_data(cpu);
776  	struct microcode_amd *mc_amd;
777  	struct ucode_cpu_info *uci;
778  	struct ucode_patch *p;
779  	enum ucode_state ret;
780  	u32 rev;
781  
782  	BUG_ON(raw_smp_processor_id() != cpu);
783  
784  	uci = ucode_cpu_info + cpu;
785  
786  	p = find_patch(cpu);
787  	if (!p)
788  		return UCODE_NFOUND;
789  
790  	rev = uci->cpu_sig.rev;
791  
792  	mc_amd  = p->data;
793  	uci->mc = p->data;
794  
795  	/* need to apply patch? */
796  	if (rev > mc_amd->hdr.patch_id) {
797  		ret = UCODE_OK;
798  		goto out;
799  	}
800  
801  	if (__apply_microcode_amd(mc_amd)) {
802  		pr_err("CPU%d: update failed for patch_level=0x%08x\n",
803  			cpu, mc_amd->hdr.patch_id);
804  		return UCODE_ERROR;
805  	}
806  
807  	rev = mc_amd->hdr.patch_id;
808  	ret = UCODE_UPDATED;
809  
810  out:
811  	uci->cpu_sig.rev = rev;
812  	c->microcode	 = rev;
813  
814  	/* Update boot_cpu_data's revision too, if we're on the BSP: */
815  	if (c->cpu_index == boot_cpu_data.cpu_index)
816  		boot_cpu_data.microcode = rev;
817  
818  	return ret;
819  }
820  
load_ucode_amd_ap(unsigned int cpuid_1_eax)821  void load_ucode_amd_ap(unsigned int cpuid_1_eax)
822  {
823  	unsigned int cpu = smp_processor_id();
824  
825  	ucode_cpu_info[cpu].cpu_sig.sig = cpuid_1_eax;
826  	apply_microcode_amd(cpu);
827  }
828  
install_equiv_cpu_table(const u8 * buf,size_t buf_size)829  static size_t install_equiv_cpu_table(const u8 *buf, size_t buf_size)
830  {
831  	u32 equiv_tbl_len;
832  	const u32 *hdr;
833  
834  	if (!verify_equivalence_table(buf, buf_size))
835  		return 0;
836  
837  	hdr = (const u32 *)buf;
838  	equiv_tbl_len = hdr[2];
839  
840  	/* Zen and newer do not need an equivalence table. */
841  	if (x86_family(bsp_cpuid_1_eax) >= 0x17)
842  		goto out;
843  
844  	equiv_table.entry = vmalloc(equiv_tbl_len);
845  	if (!equiv_table.entry) {
846  		pr_err("failed to allocate equivalent CPU table\n");
847  		return 0;
848  	}
849  
850  	memcpy(equiv_table.entry, buf + CONTAINER_HDR_SZ, equiv_tbl_len);
851  	equiv_table.num_entries = equiv_tbl_len / sizeof(struct equiv_cpu_entry);
852  
853  out:
854  	/* add header length */
855  	return equiv_tbl_len + CONTAINER_HDR_SZ;
856  }
857  
free_equiv_cpu_table(void)858  static void free_equiv_cpu_table(void)
859  {
860  	if (x86_family(bsp_cpuid_1_eax) >= 0x17)
861  		return;
862  
863  	vfree(equiv_table.entry);
864  	memset(&equiv_table, 0, sizeof(equiv_table));
865  }
866  
cleanup(void)867  static void cleanup(void)
868  {
869  	free_equiv_cpu_table();
870  	free_cache();
871  }
872  
873  /*
874   * Return a non-negative value even if some of the checks failed so that
875   * we can skip over the next patch. If we return a negative value, we
876   * signal a grave error like a memory allocation has failed and the
877   * driver cannot continue functioning normally. In such cases, we tear
878   * down everything we've used up so far and exit.
879   */
verify_and_add_patch(u8 family,u8 * fw,unsigned int leftover,unsigned int * patch_size)880  static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover,
881  				unsigned int *patch_size)
882  {
883  	struct microcode_header_amd *mc_hdr;
884  	struct ucode_patch *patch;
885  	u16 proc_id;
886  	int ret;
887  
888  	ret = verify_patch(fw, leftover, patch_size);
889  	if (ret)
890  		return ret;
891  
892  	patch = kzalloc(sizeof(*patch), GFP_KERNEL);
893  	if (!patch) {
894  		pr_err("Patch allocation failure.\n");
895  		return -EINVAL;
896  	}
897  
898  	patch->data = kmemdup(fw + SECTION_HDR_SIZE, *patch_size, GFP_KERNEL);
899  	if (!patch->data) {
900  		pr_err("Patch data allocation failure.\n");
901  		kfree(patch);
902  		return -EINVAL;
903  	}
904  	patch->size = *patch_size;
905  
906  	mc_hdr      = (struct microcode_header_amd *)(fw + SECTION_HDR_SIZE);
907  	proc_id     = mc_hdr->processor_rev_id;
908  
909  	INIT_LIST_HEAD(&patch->plist);
910  	patch->patch_id  = mc_hdr->patch_id;
911  	patch->equiv_cpu = proc_id;
912  
913  	pr_debug("%s: Adding patch_id: 0x%08x, proc_id: 0x%04x\n",
914  		 __func__, patch->patch_id, proc_id);
915  
916  	/* ... and add to cache. */
917  	update_cache(patch);
918  
919  	return 0;
920  }
921  
922  /* Scan the blob in @data and add microcode patches to the cache. */
__load_microcode_amd(u8 family,const u8 * data,size_t size)923  static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
924  					     size_t size)
925  {
926  	u8 *fw = (u8 *)data;
927  	size_t offset;
928  
929  	offset = install_equiv_cpu_table(data, size);
930  	if (!offset)
931  		return UCODE_ERROR;
932  
933  	fw   += offset;
934  	size -= offset;
935  
936  	if (*(u32 *)fw != UCODE_UCODE_TYPE) {
937  		pr_err("invalid type field in container file section header\n");
938  		free_equiv_cpu_table();
939  		return UCODE_ERROR;
940  	}
941  
942  	while (size > 0) {
943  		unsigned int crnt_size = 0;
944  		int ret;
945  
946  		ret = verify_and_add_patch(family, fw, size, &crnt_size);
947  		if (ret < 0)
948  			return UCODE_ERROR;
949  
950  		fw   +=  crnt_size + SECTION_HDR_SIZE;
951  		size -= (crnt_size + SECTION_HDR_SIZE);
952  	}
953  
954  	return UCODE_OK;
955  }
956  
_load_microcode_amd(u8 family,const u8 * data,size_t size)957  static enum ucode_state _load_microcode_amd(u8 family, const u8 *data, size_t size)
958  {
959  	enum ucode_state ret;
960  
961  	/* free old equiv table */
962  	free_equiv_cpu_table();
963  
964  	ret = __load_microcode_amd(family, data, size);
965  	if (ret != UCODE_OK)
966  		cleanup();
967  
968  	return ret;
969  }
970  
load_microcode_amd(u8 family,const u8 * data,size_t size)971  static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size)
972  {
973  	struct cpuinfo_x86 *c;
974  	unsigned int nid, cpu;
975  	struct ucode_patch *p;
976  	enum ucode_state ret;
977  
978  	ret = _load_microcode_amd(family, data, size);
979  	if (ret != UCODE_OK)
980  		return ret;
981  
982  	for_each_node(nid) {
983  		cpu = cpumask_first(cpumask_of_node(nid));
984  		c = &cpu_data(cpu);
985  
986  		p = find_patch(cpu);
987  		if (!p)
988  			continue;
989  
990  		if (c->microcode >= p->patch_id)
991  			continue;
992  
993  		ret = UCODE_NEW;
994  	}
995  
996  	return ret;
997  }
998  
999  /*
1000   * AMD microcode firmware naming convention, up to family 15h they are in
1001   * the legacy file:
1002   *
1003   *    amd-ucode/microcode_amd.bin
1004   *
1005   * This legacy file is always smaller than 2K in size.
1006   *
1007   * Beginning with family 15h, they are in family-specific firmware files:
1008   *
1009   *    amd-ucode/microcode_amd_fam15h.bin
1010   *    amd-ucode/microcode_amd_fam16h.bin
1011   *    ...
1012   *
1013   * These might be larger than 2K.
1014   */
request_microcode_amd(int cpu,struct device * device)1015  static enum ucode_state request_microcode_amd(int cpu, struct device *device)
1016  {
1017  	char fw_name[36] = "amd-ucode/microcode_amd.bin";
1018  	struct cpuinfo_x86 *c = &cpu_data(cpu);
1019  	enum ucode_state ret = UCODE_NFOUND;
1020  	const struct firmware *fw;
1021  
1022  	if (force_minrev)
1023  		return UCODE_NFOUND;
1024  
1025  	if (c->x86 >= 0x15)
1026  		snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86);
1027  
1028  	if (request_firmware_direct(&fw, (const char *)fw_name, device)) {
1029  		pr_debug("failed to load file %s\n", fw_name);
1030  		goto out;
1031  	}
1032  
1033  	ret = UCODE_ERROR;
1034  	if (!verify_container(fw->data, fw->size))
1035  		goto fw_release;
1036  
1037  	ret = load_microcode_amd(c->x86, fw->data, fw->size);
1038  
1039   fw_release:
1040  	release_firmware(fw);
1041  
1042   out:
1043  	return ret;
1044  }
1045  
microcode_fini_cpu_amd(int cpu)1046  static void microcode_fini_cpu_amd(int cpu)
1047  {
1048  	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
1049  
1050  	uci->mc = NULL;
1051  }
1052  
1053  static struct microcode_ops microcode_amd_ops = {
1054  	.request_microcode_fw	= request_microcode_amd,
1055  	.collect_cpu_info	= collect_cpu_info_amd,
1056  	.apply_microcode	= apply_microcode_amd,
1057  	.microcode_fini_cpu	= microcode_fini_cpu_amd,
1058  	.nmi_safe		= true,
1059  };
1060  
init_amd_microcode(void)1061  struct microcode_ops * __init init_amd_microcode(void)
1062  {
1063  	struct cpuinfo_x86 *c = &boot_cpu_data;
1064  
1065  	if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
1066  		pr_warn("AMD CPU family 0x%x not supported\n", c->x86);
1067  		return NULL;
1068  	}
1069  	return &microcode_amd_ops;
1070  }
1071  
exit_amd_microcode(void)1072  void __exit exit_amd_microcode(void)
1073  {
1074  	cleanup();
1075  }
1076