cpu.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595
  1. /*
  2. * (C) Copyright 2008-2011
  3. * Graeme Russ, <graeme.russ@gmail.com>
  4. *
  5. * (C) Copyright 2002
  6. * Daniel Engström, Omicron Ceti AB, <daniel@omicron.se>
  7. *
  8. * (C) Copyright 2002
  9. * Sysgo Real-Time Solutions, GmbH <www.elinos.com>
  10. * Marius Groeger <mgroeger@sysgo.de>
  11. *
  12. * (C) Copyright 2002
  13. * Sysgo Real-Time Solutions, GmbH <www.elinos.com>
  14. * Alex Zuepke <azu@sysgo.de>
  15. *
  16. * Part of this file is adapted from coreboot
  17. * src/arch/x86/lib/cpu.c
  18. *
  19. * SPDX-License-Identifier: GPL-2.0+
  20. */
  21. #include <common.h>
  22. #include <command.h>
  23. #include <errno.h>
  24. #include <malloc.h>
  25. #include <asm/control_regs.h>
  26. #include <asm/cpu.h>
  27. #include <asm/post.h>
  28. #include <asm/processor.h>
  29. #include <asm/processor-flags.h>
  30. #include <asm/interrupt.h>
  31. #include <linux/compiler.h>
  32. DECLARE_GLOBAL_DATA_PTR;
  33. /*
  34. * Constructor for a conventional segment GDT (or LDT) entry
  35. * This is a macro so it can be used in initialisers
  36. */
  37. #define GDT_ENTRY(flags, base, limit) \
  38. ((((base) & 0xff000000ULL) << (56-24)) | \
  39. (((flags) & 0x0000f0ffULL) << 40) | \
  40. (((limit) & 0x000f0000ULL) << (48-16)) | \
  41. (((base) & 0x00ffffffULL) << 16) | \
  42. (((limit) & 0x0000ffffULL)))
  43. struct gdt_ptr {
  44. u16 len;
  45. u32 ptr;
  46. } __packed;
  47. struct cpu_device_id {
  48. unsigned vendor;
  49. unsigned device;
  50. };
  51. struct cpuinfo_x86 {
  52. uint8_t x86; /* CPU family */
  53. uint8_t x86_vendor; /* CPU vendor */
  54. uint8_t x86_model;
  55. uint8_t x86_mask;
  56. };
  57. /*
  58. * List of cpu vendor strings along with their normalized
  59. * id values.
  60. */
  61. static struct {
  62. int vendor;
  63. const char *name;
  64. } x86_vendors[] = {
  65. { X86_VENDOR_INTEL, "GenuineIntel", },
  66. { X86_VENDOR_CYRIX, "CyrixInstead", },
  67. { X86_VENDOR_AMD, "AuthenticAMD", },
  68. { X86_VENDOR_UMC, "UMC UMC UMC ", },
  69. { X86_VENDOR_NEXGEN, "NexGenDriven", },
  70. { X86_VENDOR_CENTAUR, "CentaurHauls", },
  71. { X86_VENDOR_RISE, "RiseRiseRise", },
  72. { X86_VENDOR_TRANSMETA, "GenuineTMx86", },
  73. { X86_VENDOR_TRANSMETA, "TransmetaCPU", },
  74. { X86_VENDOR_NSC, "Geode by NSC", },
  75. { X86_VENDOR_SIS, "SiS SiS SiS ", },
  76. };
  77. static const char *const x86_vendor_name[] = {
  78. [X86_VENDOR_INTEL] = "Intel",
  79. [X86_VENDOR_CYRIX] = "Cyrix",
  80. [X86_VENDOR_AMD] = "AMD",
  81. [X86_VENDOR_UMC] = "UMC",
  82. [X86_VENDOR_NEXGEN] = "NexGen",
  83. [X86_VENDOR_CENTAUR] = "Centaur",
  84. [X86_VENDOR_RISE] = "Rise",
  85. [X86_VENDOR_TRANSMETA] = "Transmeta",
  86. [X86_VENDOR_NSC] = "NSC",
  87. [X86_VENDOR_SIS] = "SiS",
  88. };
  89. static void load_ds(u32 segment)
  90. {
  91. asm volatile("movl %0, %%ds" : : "r" (segment * X86_GDT_ENTRY_SIZE));
  92. }
  93. static void load_es(u32 segment)
  94. {
  95. asm volatile("movl %0, %%es" : : "r" (segment * X86_GDT_ENTRY_SIZE));
  96. }
  97. static void load_fs(u32 segment)
  98. {
  99. asm volatile("movl %0, %%fs" : : "r" (segment * X86_GDT_ENTRY_SIZE));
  100. }
  101. static void load_gs(u32 segment)
  102. {
  103. asm volatile("movl %0, %%gs" : : "r" (segment * X86_GDT_ENTRY_SIZE));
  104. }
  105. static void load_ss(u32 segment)
  106. {
  107. asm volatile("movl %0, %%ss" : : "r" (segment * X86_GDT_ENTRY_SIZE));
  108. }
  109. static void load_gdt(const u64 *boot_gdt, u16 num_entries)
  110. {
  111. struct gdt_ptr gdt;
  112. gdt.len = (num_entries * X86_GDT_ENTRY_SIZE) - 1;
  113. gdt.ptr = (u32)boot_gdt;
  114. asm volatile("lgdtl %0\n" : : "m" (gdt));
  115. }
  116. void setup_gdt(gd_t *id, u64 *gdt_addr)
  117. {
  118. /* CS: code, read/execute, 4 GB, base 0 */
  119. gdt_addr[X86_GDT_ENTRY_32BIT_CS] = GDT_ENTRY(0xc09b, 0, 0xfffff);
  120. /* DS: data, read/write, 4 GB, base 0 */
  121. gdt_addr[X86_GDT_ENTRY_32BIT_DS] = GDT_ENTRY(0xc093, 0, 0xfffff);
  122. /* FS: data, read/write, 4 GB, base (Global Data Pointer) */
  123. id->arch.gd_addr = id;
  124. gdt_addr[X86_GDT_ENTRY_32BIT_FS] = GDT_ENTRY(0xc093,
  125. (ulong)&id->arch.gd_addr, 0xfffff);
  126. /* 16-bit CS: code, read/execute, 64 kB, base 0 */
  127. gdt_addr[X86_GDT_ENTRY_16BIT_CS] = GDT_ENTRY(0x009b, 0, 0x0ffff);
  128. /* 16-bit DS: data, read/write, 64 kB, base 0 */
  129. gdt_addr[X86_GDT_ENTRY_16BIT_DS] = GDT_ENTRY(0x0093, 0, 0x0ffff);
  130. gdt_addr[X86_GDT_ENTRY_16BIT_FLAT_CS] = GDT_ENTRY(0x809b, 0, 0xfffff);
  131. gdt_addr[X86_GDT_ENTRY_16BIT_FLAT_DS] = GDT_ENTRY(0x8093, 0, 0xfffff);
  132. load_gdt(gdt_addr, X86_GDT_NUM_ENTRIES);
  133. load_ds(X86_GDT_ENTRY_32BIT_DS);
  134. load_es(X86_GDT_ENTRY_32BIT_DS);
  135. load_gs(X86_GDT_ENTRY_32BIT_DS);
  136. load_ss(X86_GDT_ENTRY_32BIT_DS);
  137. load_fs(X86_GDT_ENTRY_32BIT_FS);
  138. }
  139. int __weak x86_cleanup_before_linux(void)
  140. {
  141. #ifdef CONFIG_BOOTSTAGE_STASH
  142. bootstage_stash((void *)CONFIG_BOOTSTAGE_STASH_ADDR,
  143. CONFIG_BOOTSTAGE_STASH_SIZE);
  144. #endif
  145. return 0;
  146. }
  147. /*
  148. * Cyrix CPUs without cpuid or with cpuid not yet enabled can be detected
  149. * by the fact that they preserve the flags across the division of 5/2.
  150. * PII and PPro exhibit this behavior too, but they have cpuid available.
  151. */
  152. /*
  153. * Perform the Cyrix 5/2 test. A Cyrix won't change
  154. * the flags, while other 486 chips will.
  155. */
  156. static inline int test_cyrix_52div(void)
  157. {
  158. unsigned int test;
  159. __asm__ __volatile__(
  160. "sahf\n\t" /* clear flags (%eax = 0x0005) */
  161. "div %b2\n\t" /* divide 5 by 2 */
  162. "lahf" /* store flags into %ah */
  163. : "=a" (test)
  164. : "0" (5), "q" (2)
  165. : "cc");
  166. /* AH is 0x02 on Cyrix after the divide.. */
  167. return (unsigned char) (test >> 8) == 0x02;
  168. }
  169. /*
  170. * Detect a NexGen CPU running without BIOS hypercode new enough
  171. * to have CPUID. (Thanks to Herbert Oppmann)
  172. */
  173. static int deep_magic_nexgen_probe(void)
  174. {
  175. int ret;
  176. __asm__ __volatile__ (
  177. " movw $0x5555, %%ax\n"
  178. " xorw %%dx,%%dx\n"
  179. " movw $2, %%cx\n"
  180. " divw %%cx\n"
  181. " movl $0, %%eax\n"
  182. " jnz 1f\n"
  183. " movl $1, %%eax\n"
  184. "1:\n"
  185. : "=a" (ret) : : "cx", "dx");
  186. return ret;
  187. }
  188. static bool has_cpuid(void)
  189. {
  190. return flag_is_changeable_p(X86_EFLAGS_ID);
  191. }
  192. static bool has_mtrr(void)
  193. {
  194. return cpuid_edx(0x00000001) & (1 << 12) ? true : false;
  195. }
  196. static int build_vendor_name(char *vendor_name)
  197. {
  198. struct cpuid_result result;
  199. result = cpuid(0x00000000);
  200. unsigned int *name_as_ints = (unsigned int *)vendor_name;
  201. name_as_ints[0] = result.ebx;
  202. name_as_ints[1] = result.edx;
  203. name_as_ints[2] = result.ecx;
  204. return result.eax;
  205. }
  206. static void identify_cpu(struct cpu_device_id *cpu)
  207. {
  208. char vendor_name[16];
  209. int i;
  210. vendor_name[0] = '\0'; /* Unset */
  211. cpu->device = 0; /* fix gcc 4.4.4 warning */
  212. /* Find the id and vendor_name */
  213. if (!has_cpuid()) {
  214. /* Its a 486 if we can modify the AC flag */
  215. if (flag_is_changeable_p(X86_EFLAGS_AC))
  216. cpu->device = 0x00000400; /* 486 */
  217. else
  218. cpu->device = 0x00000300; /* 386 */
  219. if ((cpu->device == 0x00000400) && test_cyrix_52div()) {
  220. memcpy(vendor_name, "CyrixInstead", 13);
  221. /* If we ever care we can enable cpuid here */
  222. }
  223. /* Detect NexGen with old hypercode */
  224. else if (deep_magic_nexgen_probe())
  225. memcpy(vendor_name, "NexGenDriven", 13);
  226. }
  227. if (has_cpuid()) {
  228. int cpuid_level;
  229. cpuid_level = build_vendor_name(vendor_name);
  230. vendor_name[12] = '\0';
  231. /* Intel-defined flags: level 0x00000001 */
  232. if (cpuid_level >= 0x00000001) {
  233. cpu->device = cpuid_eax(0x00000001);
  234. } else {
  235. /* Have CPUID level 0 only unheard of */
  236. cpu->device = 0x00000400;
  237. }
  238. }
  239. cpu->vendor = X86_VENDOR_UNKNOWN;
  240. for (i = 0; i < ARRAY_SIZE(x86_vendors); i++) {
  241. if (memcmp(vendor_name, x86_vendors[i].name, 12) == 0) {
  242. cpu->vendor = x86_vendors[i].vendor;
  243. break;
  244. }
  245. }
  246. }
  247. static inline void get_fms(struct cpuinfo_x86 *c, uint32_t tfms)
  248. {
  249. c->x86 = (tfms >> 8) & 0xf;
  250. c->x86_model = (tfms >> 4) & 0xf;
  251. c->x86_mask = tfms & 0xf;
  252. if (c->x86 == 0xf)
  253. c->x86 += (tfms >> 20) & 0xff;
  254. if (c->x86 >= 0x6)
  255. c->x86_model += ((tfms >> 16) & 0xF) << 4;
  256. }
  257. int x86_cpu_init_f(void)
  258. {
  259. const u32 em_rst = ~X86_CR0_EM;
  260. const u32 mp_ne_set = X86_CR0_MP | X86_CR0_NE;
  261. /* initialize FPU, reset EM, set MP and NE */
  262. asm ("fninit\n" \
  263. "movl %%cr0, %%eax\n" \
  264. "andl %0, %%eax\n" \
  265. "orl %1, %%eax\n" \
  266. "movl %%eax, %%cr0\n" \
  267. : : "i" (em_rst), "i" (mp_ne_set) : "eax");
  268. /* identify CPU via cpuid and store the decoded info into gd->arch */
  269. if (has_cpuid()) {
  270. struct cpu_device_id cpu;
  271. struct cpuinfo_x86 c;
  272. identify_cpu(&cpu);
  273. get_fms(&c, cpu.device);
  274. gd->arch.x86 = c.x86;
  275. gd->arch.x86_vendor = cpu.vendor;
  276. gd->arch.x86_model = c.x86_model;
  277. gd->arch.x86_mask = c.x86_mask;
  278. gd->arch.x86_device = cpu.device;
  279. gd->arch.has_mtrr = has_mtrr();
  280. }
  281. return 0;
  282. }
  283. void x86_enable_caches(void)
  284. {
  285. unsigned long cr0;
  286. cr0 = read_cr0();
  287. cr0 &= ~(X86_CR0_NW | X86_CR0_CD);
  288. write_cr0(cr0);
  289. wbinvd();
  290. }
  291. void enable_caches(void) __attribute__((weak, alias("x86_enable_caches")));
  292. void x86_disable_caches(void)
  293. {
  294. unsigned long cr0;
  295. cr0 = read_cr0();
  296. cr0 |= X86_CR0_NW | X86_CR0_CD;
  297. wbinvd();
  298. write_cr0(cr0);
  299. wbinvd();
  300. }
  301. void disable_caches(void) __attribute__((weak, alias("x86_disable_caches")));
  302. int x86_init_cache(void)
  303. {
  304. enable_caches();
  305. return 0;
  306. }
  307. int init_cache(void) __attribute__((weak, alias("x86_init_cache")));
  308. int do_reset(cmd_tbl_t *cmdtp, int flag, int argc, char * const argv[])
  309. {
  310. printf("resetting ...\n");
  311. /* wait 50 ms */
  312. udelay(50000);
  313. disable_interrupts();
  314. reset_cpu(0);
  315. /*NOTREACHED*/
  316. return 0;
  317. }
  318. void flush_cache(unsigned long dummy1, unsigned long dummy2)
  319. {
  320. asm("wbinvd\n");
  321. }
  322. void __attribute__ ((regparm(0))) generate_gpf(void);
  323. /* segment 0x70 is an arbitrary segment which does not exist */
  324. asm(".globl generate_gpf\n"
  325. ".hidden generate_gpf\n"
  326. ".type generate_gpf, @function\n"
  327. "generate_gpf:\n"
  328. "ljmp $0x70, $0x47114711\n");
  329. __weak void reset_cpu(ulong addr)
  330. {
  331. printf("Resetting using x86 Triple Fault\n");
  332. set_vector(13, generate_gpf); /* general protection fault handler */
  333. set_vector(8, generate_gpf); /* double fault handler */
  334. generate_gpf(); /* start the show */
  335. }
  336. int dcache_status(void)
  337. {
  338. return !(read_cr0() & 0x40000000);
  339. }
  340. /* Define these functions to allow ehch-hcd to function */
  341. void flush_dcache_range(unsigned long start, unsigned long stop)
  342. {
  343. }
  344. void invalidate_dcache_range(unsigned long start, unsigned long stop)
  345. {
  346. }
  347. void dcache_enable(void)
  348. {
  349. enable_caches();
  350. }
  351. void dcache_disable(void)
  352. {
  353. disable_caches();
  354. }
  355. void icache_enable(void)
  356. {
  357. }
  358. void icache_disable(void)
  359. {
  360. }
  361. int icache_status(void)
  362. {
  363. return 1;
  364. }
  365. void cpu_enable_paging_pae(ulong cr3)
  366. {
  367. __asm__ __volatile__(
  368. /* Load the page table address */
  369. "movl %0, %%cr3\n"
  370. /* Enable pae */
  371. "movl %%cr4, %%eax\n"
  372. "orl $0x00000020, %%eax\n"
  373. "movl %%eax, %%cr4\n"
  374. /* Enable paging */
  375. "movl %%cr0, %%eax\n"
  376. "orl $0x80000000, %%eax\n"
  377. "movl %%eax, %%cr0\n"
  378. :
  379. : "r" (cr3)
  380. : "eax");
  381. }
  382. void cpu_disable_paging_pae(void)
  383. {
  384. /* Turn off paging */
  385. __asm__ __volatile__ (
  386. /* Disable paging */
  387. "movl %%cr0, %%eax\n"
  388. "andl $0x7fffffff, %%eax\n"
  389. "movl %%eax, %%cr0\n"
  390. /* Disable pae */
  391. "movl %%cr4, %%eax\n"
  392. "andl $0xffffffdf, %%eax\n"
  393. "movl %%eax, %%cr4\n"
  394. :
  395. :
  396. : "eax");
  397. }
  398. static bool can_detect_long_mode(void)
  399. {
  400. return cpuid_eax(0x80000000) > 0x80000000UL;
  401. }
  402. static bool has_long_mode(void)
  403. {
  404. return cpuid_edx(0x80000001) & (1 << 29) ? true : false;
  405. }
  406. int cpu_has_64bit(void)
  407. {
  408. return has_cpuid() && can_detect_long_mode() &&
  409. has_long_mode();
  410. }
  411. const char *cpu_vendor_name(int vendor)
  412. {
  413. const char *name;
  414. name = "<invalid cpu vendor>";
  415. if ((vendor < (ARRAY_SIZE(x86_vendor_name))) &&
  416. (x86_vendor_name[vendor] != 0))
  417. name = x86_vendor_name[vendor];
  418. return name;
  419. }
  420. char *cpu_get_name(char *name)
  421. {
  422. unsigned int *name_as_ints = (unsigned int *)name;
  423. struct cpuid_result regs;
  424. char *ptr;
  425. int i;
  426. /* This bit adds up to 48 bytes */
  427. for (i = 0; i < 3; i++) {
  428. regs = cpuid(0x80000002 + i);
  429. name_as_ints[i * 4 + 0] = regs.eax;
  430. name_as_ints[i * 4 + 1] = regs.ebx;
  431. name_as_ints[i * 4 + 2] = regs.ecx;
  432. name_as_ints[i * 4 + 3] = regs.edx;
  433. }
  434. name[CPU_MAX_NAME_LEN - 1] = '\0';
  435. /* Skip leading spaces. */
  436. ptr = name;
  437. while (*ptr == ' ')
  438. ptr++;
  439. return ptr;
  440. }
  441. int default_print_cpuinfo(void)
  442. {
  443. printf("CPU: %s, vendor %s, device %xh\n",
  444. cpu_has_64bit() ? "x86_64" : "x86",
  445. cpu_vendor_name(gd->arch.x86_vendor), gd->arch.x86_device);
  446. return 0;
  447. }
  448. #define PAGETABLE_SIZE (6 * 4096)
  449. /**
  450. * build_pagetable() - build a flat 4GiB page table structure for 64-bti mode
  451. *
  452. * @pgtable: Pointer to a 24iKB block of memory
  453. */
  454. static void build_pagetable(uint32_t *pgtable)
  455. {
  456. uint i;
  457. memset(pgtable, '\0', PAGETABLE_SIZE);
  458. /* Level 4 needs a single entry */
  459. pgtable[0] = (uint32_t)&pgtable[1024] + 7;
  460. /* Level 3 has one 64-bit entry for each GiB of memory */
  461. for (i = 0; i < 4; i++) {
  462. pgtable[1024 + i * 2] = (uint32_t)&pgtable[2048] +
  463. 0x1000 * i + 7;
  464. }
  465. /* Level 2 has 2048 64-bit entries, each repesenting 2MiB */
  466. for (i = 0; i < 2048; i++)
  467. pgtable[2048 + i * 2] = 0x183 + (i << 21UL);
  468. }
  469. int cpu_jump_to_64bit(ulong setup_base, ulong target)
  470. {
  471. uint32_t *pgtable;
  472. pgtable = memalign(4096, PAGETABLE_SIZE);
  473. if (!pgtable)
  474. return -ENOMEM;
  475. build_pagetable(pgtable);
  476. cpu_call64((ulong)pgtable, setup_base, target);
  477. free(pgtable);
  478. return -EFAULT;
  479. }
  480. void show_boot_progress(int val)
  481. {
  482. #if MIN_PORT80_KCLOCKS_DELAY
  483. /*
  484. * Scale the time counter reading to avoid using 64 bit arithmetics.
  485. * Can't use get_timer() here becuase it could be not yet
  486. * initialized or even implemented.
  487. */
  488. if (!gd->arch.tsc_prev) {
  489. gd->arch.tsc_base_kclocks = rdtsc() / 1000;
  490. gd->arch.tsc_prev = 0;
  491. } else {
  492. uint32_t now;
  493. do {
  494. now = rdtsc() / 1000 - gd->arch.tsc_base_kclocks;
  495. } while (now < (gd->arch.tsc_prev + MIN_PORT80_KCLOCKS_DELAY));
  496. gd->arch.tsc_prev = now;
  497. }
  498. #endif
  499. outb(val, POST_PORT);
  500. }