cpu.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569
  1. /*
  2. * (C) Copyright 2008-2011
  3. * Graeme Russ, <graeme.russ@gmail.com>
  4. *
  5. * (C) Copyright 2002
  6. * Daniel Engström, Omicron Ceti AB, <daniel@omicron.se>
  7. *
  8. * (C) Copyright 2002
  9. * Sysgo Real-Time Solutions, GmbH <www.elinos.com>
  10. * Marius Groeger <mgroeger@sysgo.de>
  11. *
  12. * (C) Copyright 2002
  13. * Sysgo Real-Time Solutions, GmbH <www.elinos.com>
  14. * Alex Zuepke <azu@sysgo.de>
  15. *
  16. * Part of this file is adapted from coreboot
  17. * src/arch/x86/lib/cpu.c
  18. *
  19. * SPDX-License-Identifier: GPL-2.0+
  20. */
  21. #include <common.h>
  22. #include <command.h>
  23. #include <errno.h>
  24. #include <malloc.h>
  25. #include <asm/control_regs.h>
  26. #include <asm/cpu.h>
  27. #include <asm/processor.h>
  28. #include <asm/processor-flags.h>
  29. #include <asm/interrupt.h>
  30. #include <linux/compiler.h>
  31. DECLARE_GLOBAL_DATA_PTR;
  32. /*
  33. * Constructor for a conventional segment GDT (or LDT) entry
  34. * This is a macro so it can be used in initialisers
  35. */
  36. #define GDT_ENTRY(flags, base, limit) \
  37. ((((base) & 0xff000000ULL) << (56-24)) | \
  38. (((flags) & 0x0000f0ffULL) << 40) | \
  39. (((limit) & 0x000f0000ULL) << (48-16)) | \
  40. (((base) & 0x00ffffffULL) << 16) | \
  41. (((limit) & 0x0000ffffULL)))
  42. struct gdt_ptr {
  43. u16 len;
  44. u32 ptr;
  45. } __packed;
  46. struct cpu_device_id {
  47. unsigned vendor;
  48. unsigned device;
  49. };
  50. struct cpuinfo_x86 {
  51. uint8_t x86; /* CPU family */
  52. uint8_t x86_vendor; /* CPU vendor */
  53. uint8_t x86_model;
  54. uint8_t x86_mask;
  55. };
  56. /*
  57. * List of cpu vendor strings along with their normalized
  58. * id values.
  59. */
  60. static struct {
  61. int vendor;
  62. const char *name;
  63. } x86_vendors[] = {
  64. { X86_VENDOR_INTEL, "GenuineIntel", },
  65. { X86_VENDOR_CYRIX, "CyrixInstead", },
  66. { X86_VENDOR_AMD, "AuthenticAMD", },
  67. { X86_VENDOR_UMC, "UMC UMC UMC ", },
  68. { X86_VENDOR_NEXGEN, "NexGenDriven", },
  69. { X86_VENDOR_CENTAUR, "CentaurHauls", },
  70. { X86_VENDOR_RISE, "RiseRiseRise", },
  71. { X86_VENDOR_TRANSMETA, "GenuineTMx86", },
  72. { X86_VENDOR_TRANSMETA, "TransmetaCPU", },
  73. { X86_VENDOR_NSC, "Geode by NSC", },
  74. { X86_VENDOR_SIS, "SiS SiS SiS ", },
  75. };
  76. static const char *const x86_vendor_name[] = {
  77. [X86_VENDOR_INTEL] = "Intel",
  78. [X86_VENDOR_CYRIX] = "Cyrix",
  79. [X86_VENDOR_AMD] = "AMD",
  80. [X86_VENDOR_UMC] = "UMC",
  81. [X86_VENDOR_NEXGEN] = "NexGen",
  82. [X86_VENDOR_CENTAUR] = "Centaur",
  83. [X86_VENDOR_RISE] = "Rise",
  84. [X86_VENDOR_TRANSMETA] = "Transmeta",
  85. [X86_VENDOR_NSC] = "NSC",
  86. [X86_VENDOR_SIS] = "SiS",
  87. };
  88. static void load_ds(u32 segment)
  89. {
  90. asm volatile("movl %0, %%ds" : : "r" (segment * X86_GDT_ENTRY_SIZE));
  91. }
  92. static void load_es(u32 segment)
  93. {
  94. asm volatile("movl %0, %%es" : : "r" (segment * X86_GDT_ENTRY_SIZE));
  95. }
  96. static void load_fs(u32 segment)
  97. {
  98. asm volatile("movl %0, %%fs" : : "r" (segment * X86_GDT_ENTRY_SIZE));
  99. }
  100. static void load_gs(u32 segment)
  101. {
  102. asm volatile("movl %0, %%gs" : : "r" (segment * X86_GDT_ENTRY_SIZE));
  103. }
  104. static void load_ss(u32 segment)
  105. {
  106. asm volatile("movl %0, %%ss" : : "r" (segment * X86_GDT_ENTRY_SIZE));
  107. }
  108. static void load_gdt(const u64 *boot_gdt, u16 num_entries)
  109. {
  110. struct gdt_ptr gdt;
  111. gdt.len = (num_entries * 8) - 1;
  112. gdt.ptr = (u32)boot_gdt;
  113. asm volatile("lgdtl %0\n" : : "m" (gdt));
  114. }
  115. void setup_gdt(gd_t *id, u64 *gdt_addr)
  116. {
  117. /* CS: code, read/execute, 4 GB, base 0 */
  118. gdt_addr[X86_GDT_ENTRY_32BIT_CS] = GDT_ENTRY(0xc09b, 0, 0xfffff);
  119. /* DS: data, read/write, 4 GB, base 0 */
  120. gdt_addr[X86_GDT_ENTRY_32BIT_DS] = GDT_ENTRY(0xc093, 0, 0xfffff);
  121. /* FS: data, read/write, 4 GB, base (Global Data Pointer) */
  122. id->arch.gd_addr = id;
  123. gdt_addr[X86_GDT_ENTRY_32BIT_FS] = GDT_ENTRY(0xc093,
  124. (ulong)&id->arch.gd_addr, 0xfffff);
  125. /* 16-bit CS: code, read/execute, 64 kB, base 0 */
  126. gdt_addr[X86_GDT_ENTRY_16BIT_CS] = GDT_ENTRY(0x109b, 0, 0x0ffff);
  127. /* 16-bit DS: data, read/write, 64 kB, base 0 */
  128. gdt_addr[X86_GDT_ENTRY_16BIT_DS] = GDT_ENTRY(0x1093, 0, 0x0ffff);
  129. load_gdt(gdt_addr, X86_GDT_NUM_ENTRIES);
  130. load_ds(X86_GDT_ENTRY_32BIT_DS);
  131. load_es(X86_GDT_ENTRY_32BIT_DS);
  132. load_gs(X86_GDT_ENTRY_32BIT_DS);
  133. load_ss(X86_GDT_ENTRY_32BIT_DS);
  134. load_fs(X86_GDT_ENTRY_32BIT_FS);
  135. }
  136. int __weak x86_cleanup_before_linux(void)
  137. {
  138. #ifdef CONFIG_BOOTSTAGE_STASH
  139. bootstage_stash((void *)CONFIG_BOOTSTAGE_STASH,
  140. CONFIG_BOOTSTAGE_STASH_SIZE);
  141. #endif
  142. return 0;
  143. }
  144. /*
  145. * Cyrix CPUs without cpuid or with cpuid not yet enabled can be detected
  146. * by the fact that they preserve the flags across the division of 5/2.
  147. * PII and PPro exhibit this behavior too, but they have cpuid available.
  148. */
  149. /*
  150. * Perform the Cyrix 5/2 test. A Cyrix won't change
  151. * the flags, while other 486 chips will.
  152. */
  153. static inline int test_cyrix_52div(void)
  154. {
  155. unsigned int test;
  156. __asm__ __volatile__(
  157. "sahf\n\t" /* clear flags (%eax = 0x0005) */
  158. "div %b2\n\t" /* divide 5 by 2 */
  159. "lahf" /* store flags into %ah */
  160. : "=a" (test)
  161. : "0" (5), "q" (2)
  162. : "cc");
  163. /* AH is 0x02 on Cyrix after the divide.. */
  164. return (unsigned char) (test >> 8) == 0x02;
  165. }
  166. /*
  167. * Detect a NexGen CPU running without BIOS hypercode new enough
  168. * to have CPUID. (Thanks to Herbert Oppmann)
  169. */
  170. static int deep_magic_nexgen_probe(void)
  171. {
  172. int ret;
  173. __asm__ __volatile__ (
  174. " movw $0x5555, %%ax\n"
  175. " xorw %%dx,%%dx\n"
  176. " movw $2, %%cx\n"
  177. " divw %%cx\n"
  178. " movl $0, %%eax\n"
  179. " jnz 1f\n"
  180. " movl $1, %%eax\n"
  181. "1:\n"
  182. : "=a" (ret) : : "cx", "dx");
  183. return ret;
  184. }
  185. static bool has_cpuid(void)
  186. {
  187. return flag_is_changeable_p(X86_EFLAGS_ID);
  188. }
  189. static int build_vendor_name(char *vendor_name)
  190. {
  191. struct cpuid_result result;
  192. result = cpuid(0x00000000);
  193. unsigned int *name_as_ints = (unsigned int *)vendor_name;
  194. name_as_ints[0] = result.ebx;
  195. name_as_ints[1] = result.edx;
  196. name_as_ints[2] = result.ecx;
  197. return result.eax;
  198. }
  199. static void identify_cpu(struct cpu_device_id *cpu)
  200. {
  201. char vendor_name[16];
  202. int i;
  203. vendor_name[0] = '\0'; /* Unset */
  204. cpu->device = 0; /* fix gcc 4.4.4 warning */
  205. /* Find the id and vendor_name */
  206. if (!has_cpuid()) {
  207. /* Its a 486 if we can modify the AC flag */
  208. if (flag_is_changeable_p(X86_EFLAGS_AC))
  209. cpu->device = 0x00000400; /* 486 */
  210. else
  211. cpu->device = 0x00000300; /* 386 */
  212. if ((cpu->device == 0x00000400) && test_cyrix_52div()) {
  213. memcpy(vendor_name, "CyrixInstead", 13);
  214. /* If we ever care we can enable cpuid here */
  215. }
  216. /* Detect NexGen with old hypercode */
  217. else if (deep_magic_nexgen_probe())
  218. memcpy(vendor_name, "NexGenDriven", 13);
  219. }
  220. if (has_cpuid()) {
  221. int cpuid_level;
  222. cpuid_level = build_vendor_name(vendor_name);
  223. vendor_name[12] = '\0';
  224. /* Intel-defined flags: level 0x00000001 */
  225. if (cpuid_level >= 0x00000001) {
  226. cpu->device = cpuid_eax(0x00000001);
  227. } else {
  228. /* Have CPUID level 0 only unheard of */
  229. cpu->device = 0x00000400;
  230. }
  231. }
  232. cpu->vendor = X86_VENDOR_UNKNOWN;
  233. for (i = 0; i < ARRAY_SIZE(x86_vendors); i++) {
  234. if (memcmp(vendor_name, x86_vendors[i].name, 12) == 0) {
  235. cpu->vendor = x86_vendors[i].vendor;
  236. break;
  237. }
  238. }
  239. }
  240. static inline void get_fms(struct cpuinfo_x86 *c, uint32_t tfms)
  241. {
  242. c->x86 = (tfms >> 8) & 0xf;
  243. c->x86_model = (tfms >> 4) & 0xf;
  244. c->x86_mask = tfms & 0xf;
  245. if (c->x86 == 0xf)
  246. c->x86 += (tfms >> 20) & 0xff;
  247. if (c->x86 >= 0x6)
  248. c->x86_model += ((tfms >> 16) & 0xF) << 4;
  249. }
  250. int x86_cpu_init_f(void)
  251. {
  252. const u32 em_rst = ~X86_CR0_EM;
  253. const u32 mp_ne_set = X86_CR0_MP | X86_CR0_NE;
  254. /* initialize FPU, reset EM, set MP and NE */
  255. asm ("fninit\n" \
  256. "movl %%cr0, %%eax\n" \
  257. "andl %0, %%eax\n" \
  258. "orl %1, %%eax\n" \
  259. "movl %%eax, %%cr0\n" \
  260. : : "i" (em_rst), "i" (mp_ne_set) : "eax");
  261. /* identify CPU via cpuid and store the decoded info into gd->arch */
  262. if (has_cpuid()) {
  263. struct cpu_device_id cpu;
  264. struct cpuinfo_x86 c;
  265. identify_cpu(&cpu);
  266. get_fms(&c, cpu.device);
  267. gd->arch.x86 = c.x86;
  268. gd->arch.x86_vendor = cpu.vendor;
  269. gd->arch.x86_model = c.x86_model;
  270. gd->arch.x86_mask = c.x86_mask;
  271. gd->arch.x86_device = cpu.device;
  272. }
  273. return 0;
  274. }
  275. int x86_cpu_init_r(void)
  276. {
  277. /* Initialize core interrupt and exception functionality of CPU */
  278. cpu_init_interrupts();
  279. return 0;
  280. }
  281. int cpu_init_r(void) __attribute__((weak, alias("x86_cpu_init_r")));
  282. void x86_enable_caches(void)
  283. {
  284. unsigned long cr0;
  285. cr0 = read_cr0();
  286. cr0 &= ~(X86_CR0_NW | X86_CR0_CD);
  287. write_cr0(cr0);
  288. wbinvd();
  289. }
  290. void enable_caches(void) __attribute__((weak, alias("x86_enable_caches")));
  291. void x86_disable_caches(void)
  292. {
  293. unsigned long cr0;
  294. cr0 = read_cr0();
  295. cr0 |= X86_CR0_NW | X86_CR0_CD;
  296. wbinvd();
  297. write_cr0(cr0);
  298. wbinvd();
  299. }
  300. void disable_caches(void) __attribute__((weak, alias("x86_disable_caches")));
  301. int x86_init_cache(void)
  302. {
  303. enable_caches();
  304. return 0;
  305. }
  306. int init_cache(void) __attribute__((weak, alias("x86_init_cache")));
  307. int do_reset(cmd_tbl_t *cmdtp, int flag, int argc, char * const argv[])
  308. {
  309. printf("resetting ...\n");
  310. /* wait 50 ms */
  311. udelay(50000);
  312. disable_interrupts();
  313. reset_cpu(0);
  314. /*NOTREACHED*/
  315. return 0;
  316. }
  317. void flush_cache(unsigned long dummy1, unsigned long dummy2)
  318. {
  319. asm("wbinvd\n");
  320. }
  321. void __attribute__ ((regparm(0))) generate_gpf(void);
  322. /* segment 0x70 is an arbitrary segment which does not exist */
  323. asm(".globl generate_gpf\n"
  324. ".hidden generate_gpf\n"
  325. ".type generate_gpf, @function\n"
  326. "generate_gpf:\n"
  327. "ljmp $0x70, $0x47114711\n");
  328. __weak void reset_cpu(ulong addr)
  329. {
  330. printf("Resetting using x86 Triple Fault\n");
  331. set_vector(13, generate_gpf); /* general protection fault handler */
  332. set_vector(8, generate_gpf); /* double fault handler */
  333. generate_gpf(); /* start the show */
  334. }
  335. int dcache_status(void)
  336. {
  337. return !(read_cr0() & 0x40000000);
  338. }
  339. /* Define these functions to allow ehch-hcd to function */
  340. void flush_dcache_range(unsigned long start, unsigned long stop)
  341. {
  342. }
  343. void invalidate_dcache_range(unsigned long start, unsigned long stop)
  344. {
  345. }
  346. void dcache_enable(void)
  347. {
  348. enable_caches();
  349. }
  350. void dcache_disable(void)
  351. {
  352. disable_caches();
  353. }
  354. void icache_enable(void)
  355. {
  356. }
  357. void icache_disable(void)
  358. {
  359. }
  360. int icache_status(void)
  361. {
  362. return 1;
  363. }
  364. void cpu_enable_paging_pae(ulong cr3)
  365. {
  366. __asm__ __volatile__(
  367. /* Load the page table address */
  368. "movl %0, %%cr3\n"
  369. /* Enable pae */
  370. "movl %%cr4, %%eax\n"
  371. "orl $0x00000020, %%eax\n"
  372. "movl %%eax, %%cr4\n"
  373. /* Enable paging */
  374. "movl %%cr0, %%eax\n"
  375. "orl $0x80000000, %%eax\n"
  376. "movl %%eax, %%cr0\n"
  377. :
  378. : "r" (cr3)
  379. : "eax");
  380. }
  381. void cpu_disable_paging_pae(void)
  382. {
  383. /* Turn off paging */
  384. __asm__ __volatile__ (
  385. /* Disable paging */
  386. "movl %%cr0, %%eax\n"
  387. "andl $0x7fffffff, %%eax\n"
  388. "movl %%eax, %%cr0\n"
  389. /* Disable pae */
  390. "movl %%cr4, %%eax\n"
  391. "andl $0xffffffdf, %%eax\n"
  392. "movl %%eax, %%cr4\n"
  393. :
  394. :
  395. : "eax");
  396. }
  397. static bool can_detect_long_mode(void)
  398. {
  399. return cpuid_eax(0x80000000) > 0x80000000UL;
  400. }
  401. static bool has_long_mode(void)
  402. {
  403. return cpuid_edx(0x80000001) & (1 << 29) ? true : false;
  404. }
  405. int cpu_has_64bit(void)
  406. {
  407. return has_cpuid() && can_detect_long_mode() &&
  408. has_long_mode();
  409. }
  410. const char *cpu_vendor_name(int vendor)
  411. {
  412. const char *name;
  413. name = "<invalid cpu vendor>";
  414. if ((vendor < (ARRAY_SIZE(x86_vendor_name))) &&
  415. (x86_vendor_name[vendor] != 0))
  416. name = x86_vendor_name[vendor];
  417. return name;
  418. }
  419. char *cpu_get_name(char *name)
  420. {
  421. unsigned int *name_as_ints = (unsigned int *)name;
  422. struct cpuid_result regs;
  423. char *ptr;
  424. int i;
  425. /* This bit adds up to 48 bytes */
  426. for (i = 0; i < 3; i++) {
  427. regs = cpuid(0x80000002 + i);
  428. name_as_ints[i * 4 + 0] = regs.eax;
  429. name_as_ints[i * 4 + 1] = regs.ebx;
  430. name_as_ints[i * 4 + 2] = regs.ecx;
  431. name_as_ints[i * 4 + 3] = regs.edx;
  432. }
  433. name[CPU_MAX_NAME_LEN - 1] = '\0';
  434. /* Skip leading spaces. */
  435. ptr = name;
  436. while (*ptr == ' ')
  437. ptr++;
  438. return ptr;
  439. }
  440. int default_print_cpuinfo(void)
  441. {
  442. printf("CPU: %s, vendor %s, device %xh\n",
  443. cpu_has_64bit() ? "x86_64" : "x86",
  444. cpu_vendor_name(gd->arch.x86_vendor), gd->arch.x86_device);
  445. return 0;
  446. }
  447. #define PAGETABLE_SIZE (6 * 4096)
  448. /**
  449. * build_pagetable() - build a flat 4GiB page table structure for 64-bti mode
  450. *
  451. * @pgtable: Pointer to a 24iKB block of memory
  452. */
  453. static void build_pagetable(uint32_t *pgtable)
  454. {
  455. uint i;
  456. memset(pgtable, '\0', PAGETABLE_SIZE);
  457. /* Level 4 needs a single entry */
  458. pgtable[0] = (uint32_t)&pgtable[1024] + 7;
  459. /* Level 3 has one 64-bit entry for each GiB of memory */
  460. for (i = 0; i < 4; i++) {
  461. pgtable[1024 + i * 2] = (uint32_t)&pgtable[2048] +
  462. 0x1000 * i + 7;
  463. }
  464. /* Level 2 has 2048 64-bit entries, each repesenting 2MiB */
  465. for (i = 0; i < 2048; i++)
  466. pgtable[2048 + i * 2] = 0x183 + (i << 21UL);
  467. }
  468. int cpu_jump_to_64bit(ulong setup_base, ulong target)
  469. {
  470. uint32_t *pgtable;
  471. pgtable = memalign(4096, PAGETABLE_SIZE);
  472. if (!pgtable)
  473. return -ENOMEM;
  474. build_pagetable(pgtable);
  475. cpu_call64((ulong)pgtable, setup_base, target);
  476. free(pgtable);
  477. return -EFAULT;
  478. }