cpu.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2016 Google, Inc
  4. *
  5. * Based on code from coreboot src/soc/intel/broadwell/cpu.c
  6. */
  7. #include <common.h>
  8. #include <dm.h>
  9. #include <cpu.h>
  10. #include <asm/cpu.h>
  11. #include <asm/cpu_x86.h>
  12. #include <asm/cpu_common.h>
  13. #include <asm/intel_regs.h>
  14. #include <asm/msr.h>
  15. #include <asm/post.h>
  16. #include <asm/turbo.h>
  17. #include <asm/arch/cpu.h>
  18. #include <asm/arch/pch.h>
  19. #include <asm/arch/rcb.h>
  20. struct cpu_broadwell_priv {
  21. bool ht_disabled;
  22. };
  23. /* Convert time in seconds to POWER_LIMIT_1_TIME MSR value */
  24. static const u8 power_limit_time_sec_to_msr[] = {
  25. [0] = 0x00,
  26. [1] = 0x0a,
  27. [2] = 0x0b,
  28. [3] = 0x4b,
  29. [4] = 0x0c,
  30. [5] = 0x2c,
  31. [6] = 0x4c,
  32. [7] = 0x6c,
  33. [8] = 0x0d,
  34. [10] = 0x2d,
  35. [12] = 0x4d,
  36. [14] = 0x6d,
  37. [16] = 0x0e,
  38. [20] = 0x2e,
  39. [24] = 0x4e,
  40. [28] = 0x6e,
  41. [32] = 0x0f,
  42. [40] = 0x2f,
  43. [48] = 0x4f,
  44. [56] = 0x6f,
  45. [64] = 0x10,
  46. [80] = 0x30,
  47. [96] = 0x50,
  48. [112] = 0x70,
  49. [128] = 0x11,
  50. };
  51. /* Convert POWER_LIMIT_1_TIME MSR value to seconds */
  52. static const u8 power_limit_time_msr_to_sec[] = {
  53. [0x00] = 0,
  54. [0x0a] = 1,
  55. [0x0b] = 2,
  56. [0x4b] = 3,
  57. [0x0c] = 4,
  58. [0x2c] = 5,
  59. [0x4c] = 6,
  60. [0x6c] = 7,
  61. [0x0d] = 8,
  62. [0x2d] = 10,
  63. [0x4d] = 12,
  64. [0x6d] = 14,
  65. [0x0e] = 16,
  66. [0x2e] = 20,
  67. [0x4e] = 24,
  68. [0x6e] = 28,
  69. [0x0f] = 32,
  70. [0x2f] = 40,
  71. [0x4f] = 48,
  72. [0x6f] = 56,
  73. [0x10] = 64,
  74. [0x30] = 80,
  75. [0x50] = 96,
  76. [0x70] = 112,
  77. [0x11] = 128,
  78. };
  79. int arch_cpu_init_dm(void)
  80. {
  81. struct udevice *dev;
  82. int ret;
  83. /* Start up the LPC so we have serial */
  84. ret = uclass_first_device(UCLASS_LPC, &dev);
  85. if (ret)
  86. return ret;
  87. if (!dev)
  88. return -ENODEV;
  89. ret = cpu_set_flex_ratio_to_tdp_nominal();
  90. if (ret)
  91. return ret;
  92. return 0;
  93. }
  94. void set_max_freq(void)
  95. {
  96. msr_t msr, perf_ctl, platform_info;
  97. /* Check for configurable TDP option */
  98. platform_info = msr_read(MSR_PLATFORM_INFO);
  99. if ((platform_info.hi >> 1) & 3) {
  100. /* Set to nominal TDP ratio */
  101. msr = msr_read(MSR_CONFIG_TDP_NOMINAL);
  102. perf_ctl.lo = (msr.lo & 0xff) << 8;
  103. } else {
  104. /* Platform Info bits 15:8 give max ratio */
  105. msr = msr_read(MSR_PLATFORM_INFO);
  106. perf_ctl.lo = msr.lo & 0xff00;
  107. }
  108. perf_ctl.hi = 0;
  109. msr_write(IA32_PERF_CTL, perf_ctl);
  110. debug("CPU: frequency set to %d MHz\n",
  111. ((perf_ctl.lo >> 8) & 0xff) * CPU_BCLK);
  112. }
  113. int arch_cpu_init(void)
  114. {
  115. post_code(POST_CPU_INIT);
  116. return x86_cpu_init_f();
  117. }
  118. int checkcpu(void)
  119. {
  120. int ret;
  121. set_max_freq();
  122. ret = cpu_common_init();
  123. if (ret)
  124. return ret;
  125. gd->arch.pei_boot_mode = PEI_BOOT_NONE;
  126. return 0;
  127. }
  128. int print_cpuinfo(void)
  129. {
  130. char processor_name[CPU_MAX_NAME_LEN];
  131. const char *name;
  132. /* Print processor name */
  133. name = cpu_get_name(processor_name);
  134. printf("CPU: %s\n", name);
  135. return 0;
  136. }
  137. /*
  138. * The core 100MHz BLCK is disabled in deeper c-states. One needs to calibrate
  139. * the 100MHz BCLCK against the 24MHz BLCK to restore the clocks properly
  140. * when a core is woken up
  141. */
  142. static int pcode_ready(void)
  143. {
  144. int wait_count;
  145. const int delay_step = 10;
  146. wait_count = 0;
  147. do {
  148. if (!(readl(MCHBAR_REG(BIOS_MAILBOX_INTERFACE)) &
  149. MAILBOX_RUN_BUSY))
  150. return 0;
  151. wait_count += delay_step;
  152. udelay(delay_step);
  153. } while (wait_count < 1000);
  154. return -ETIMEDOUT;
  155. }
  156. static u32 pcode_mailbox_read(u32 command)
  157. {
  158. int ret;
  159. ret = pcode_ready();
  160. if (ret) {
  161. debug("PCODE: mailbox timeout on wait ready\n");
  162. return ret;
  163. }
  164. /* Send command and start transaction */
  165. writel(command | MAILBOX_RUN_BUSY, MCHBAR_REG(BIOS_MAILBOX_INTERFACE));
  166. ret = pcode_ready();
  167. if (ret) {
  168. debug("PCODE: mailbox timeout on completion\n");
  169. return ret;
  170. }
  171. /* Read mailbox */
  172. return readl(MCHBAR_REG(BIOS_MAILBOX_DATA));
  173. }
  174. static int pcode_mailbox_write(u32 command, u32 data)
  175. {
  176. int ret;
  177. ret = pcode_ready();
  178. if (ret) {
  179. debug("PCODE: mailbox timeout on wait ready\n");
  180. return ret;
  181. }
  182. writel(data, MCHBAR_REG(BIOS_MAILBOX_DATA));
  183. /* Send command and start transaction */
  184. writel(command | MAILBOX_RUN_BUSY, MCHBAR_REG(BIOS_MAILBOX_INTERFACE));
  185. ret = pcode_ready();
  186. if (ret) {
  187. debug("PCODE: mailbox timeout on completion\n");
  188. return ret;
  189. }
  190. return 0;
  191. }
  192. /* @dev is the CPU device */
  193. static void initialize_vr_config(struct udevice *dev)
  194. {
  195. int ramp, min_vid;
  196. msr_t msr;
  197. debug("Initializing VR config\n");
  198. /* Configure VR_CURRENT_CONFIG */
  199. msr = msr_read(MSR_VR_CURRENT_CONFIG);
  200. /*
  201. * Preserve bits 63 and 62. Bit 62 is PSI4 enable, but it is only valid
  202. * on ULT systems
  203. */
  204. msr.hi &= 0xc0000000;
  205. msr.hi |= (0x01 << (52 - 32)); /* PSI3 threshold - 1A */
  206. msr.hi |= (0x05 << (42 - 32)); /* PSI2 threshold - 5A */
  207. msr.hi |= (0x14 << (32 - 32)); /* PSI1 threshold - 20A */
  208. msr.hi |= (1 << (62 - 32)); /* Enable PSI4 */
  209. /* Leave the max instantaneous current limit (12:0) to default */
  210. msr_write(MSR_VR_CURRENT_CONFIG, msr);
  211. /* Configure VR_MISC_CONFIG MSR */
  212. msr = msr_read(MSR_VR_MISC_CONFIG);
  213. /* Set the IOUT_SLOPE scalar applied to dIout in U10.1.9 format */
  214. msr.hi &= ~(0x3ff << (40 - 32));
  215. msr.hi |= (0x200 << (40 - 32)); /* 1.0 */
  216. /* Set IOUT_OFFSET to 0 */
  217. msr.hi &= ~0xff;
  218. /* Set entry ramp rate to slow */
  219. msr.hi &= ~(1 << (51 - 32));
  220. /* Enable decay mode on C-state entry */
  221. msr.hi |= (1 << (52 - 32));
  222. /* Set the slow ramp rate */
  223. msr.hi &= ~(0x3 << (53 - 32));
  224. /* Configure the C-state exit ramp rate */
  225. ramp = fdtdec_get_int(gd->fdt_blob, dev_of_offset(dev),
  226. "intel,slow-ramp", -1);
  227. if (ramp != -1) {
  228. /* Configured slow ramp rate */
  229. msr.hi |= ((ramp & 0x3) << (53 - 32));
  230. /* Set exit ramp rate to slow */
  231. msr.hi &= ~(1 << (50 - 32));
  232. } else {
  233. /* Fast ramp rate / 4 */
  234. msr.hi |= (0x01 << (53 - 32));
  235. /* Set exit ramp rate to fast */
  236. msr.hi |= (1 << (50 - 32));
  237. }
  238. /* Set MIN_VID (31:24) to allow CPU to have full control */
  239. msr.lo &= ~0xff000000;
  240. min_vid = fdtdec_get_int(gd->fdt_blob, dev_of_offset(dev),
  241. "intel,min-vid", 0);
  242. msr.lo |= (min_vid & 0xff) << 24;
  243. msr_write(MSR_VR_MISC_CONFIG, msr);
  244. /* Configure VR_MISC_CONFIG2 MSR */
  245. msr = msr_read(MSR_VR_MISC_CONFIG2);
  246. msr.lo &= ~0xffff;
  247. /*
  248. * Allow CPU to control minimum voltage completely (15:8) and
  249. * set the fast ramp voltage in 10mV steps
  250. */
  251. if (cpu_get_family_model() == BROADWELL_FAMILY_ULT)
  252. msr.lo |= 0x006a; /* 1.56V */
  253. else
  254. msr.lo |= 0x006f; /* 1.60V */
  255. msr_write(MSR_VR_MISC_CONFIG2, msr);
  256. /* Set C9/C10 VCC Min */
  257. pcode_mailbox_write(MAILBOX_BIOS_CMD_WRITE_C9C10_VOLTAGE, 0x1f1f);
  258. }
  259. static int calibrate_24mhz_bclk(void)
  260. {
  261. int err_code;
  262. int ret;
  263. ret = pcode_ready();
  264. if (ret)
  265. return ret;
  266. /* A non-zero value initiates the PCODE calibration */
  267. writel(~0, MCHBAR_REG(BIOS_MAILBOX_DATA));
  268. writel(MAILBOX_RUN_BUSY | MAILBOX_BIOS_CMD_FSM_MEASURE_INTVL,
  269. MCHBAR_REG(BIOS_MAILBOX_INTERFACE));
  270. ret = pcode_ready();
  271. if (ret)
  272. return ret;
  273. err_code = readl(MCHBAR_REG(BIOS_MAILBOX_INTERFACE)) & 0xff;
  274. debug("PCODE: 24MHz BLCK calibration response: %d\n", err_code);
  275. /* Read the calibrated value */
  276. writel(MAILBOX_RUN_BUSY | MAILBOX_BIOS_CMD_READ_CALIBRATION,
  277. MCHBAR_REG(BIOS_MAILBOX_INTERFACE));
  278. ret = pcode_ready();
  279. if (ret)
  280. return ret;
  281. debug("PCODE: 24MHz BLCK calibration value: 0x%08x\n",
  282. readl(MCHBAR_REG(BIOS_MAILBOX_DATA)));
  283. return 0;
  284. }
  285. static void configure_pch_power_sharing(void)
  286. {
  287. u32 pch_power, pch_power_ext, pmsync, pmsync2;
  288. int i;
  289. /* Read PCH Power levels from PCODE */
  290. pch_power = pcode_mailbox_read(MAILBOX_BIOS_CMD_READ_PCH_POWER);
  291. pch_power_ext = pcode_mailbox_read(MAILBOX_BIOS_CMD_READ_PCH_POWER_EXT);
  292. debug("PCH Power: PCODE Levels 0x%08x 0x%08x\n", pch_power,
  293. pch_power_ext);
  294. pmsync = readl(RCB_REG(PMSYNC_CONFIG));
  295. pmsync2 = readl(RCB_REG(PMSYNC_CONFIG2));
  296. /*
  297. * Program PMSYNC_TPR_CONFIG PCH power limit values
  298. * pmsync[0:4] = mailbox[0:5]
  299. * pmsync[8:12] = mailbox[6:11]
  300. * pmsync[16:20] = mailbox[12:17]
  301. */
  302. for (i = 0; i < 3; i++) {
  303. u32 level = pch_power & 0x3f;
  304. pch_power >>= 6;
  305. pmsync &= ~(0x1f << (i * 8));
  306. pmsync |= (level & 0x1f) << (i * 8);
  307. }
  308. writel(pmsync, RCB_REG(PMSYNC_CONFIG));
  309. /*
  310. * Program PMSYNC_TPR_CONFIG2 Extended PCH power limit values
  311. * pmsync2[0:4] = mailbox[23:18]
  312. * pmsync2[8:12] = mailbox_ext[6:11]
  313. * pmsync2[16:20] = mailbox_ext[12:17]
  314. * pmsync2[24:28] = mailbox_ext[18:22]
  315. */
  316. pmsync2 &= ~0x1f;
  317. pmsync2 |= pch_power & 0x1f;
  318. for (i = 1; i < 4; i++) {
  319. u32 level = pch_power_ext & 0x3f;
  320. pch_power_ext >>= 6;
  321. pmsync2 &= ~(0x1f << (i * 8));
  322. pmsync2 |= (level & 0x1f) << (i * 8);
  323. }
  324. writel(pmsync2, RCB_REG(PMSYNC_CONFIG2));
  325. }
  326. static int bsp_init_before_ap_bringup(struct udevice *dev)
  327. {
  328. int ret;
  329. initialize_vr_config(dev);
  330. ret = calibrate_24mhz_bclk();
  331. if (ret)
  332. return ret;
  333. configure_pch_power_sharing();
  334. return 0;
  335. }
  336. int cpu_config_tdp_levels(void)
  337. {
  338. msr_t platform_info;
  339. /* Bits 34:33 indicate how many levels supported */
  340. platform_info = msr_read(MSR_PLATFORM_INFO);
  341. return (platform_info.hi >> 1) & 3;
  342. }
  343. static void set_max_ratio(void)
  344. {
  345. msr_t msr, perf_ctl;
  346. perf_ctl.hi = 0;
  347. /* Check for configurable TDP option */
  348. if (turbo_get_state() == TURBO_ENABLED) {
  349. msr = msr_read(MSR_NHM_TURBO_RATIO_LIMIT);
  350. perf_ctl.lo = (msr.lo & 0xff) << 8;
  351. } else if (cpu_config_tdp_levels()) {
  352. /* Set to nominal TDP ratio */
  353. msr = msr_read(MSR_CONFIG_TDP_NOMINAL);
  354. perf_ctl.lo = (msr.lo & 0xff) << 8;
  355. } else {
  356. /* Platform Info bits 15:8 give max ratio */
  357. msr = msr_read(MSR_PLATFORM_INFO);
  358. perf_ctl.lo = msr.lo & 0xff00;
  359. }
  360. msr_write(IA32_PERF_CTL, perf_ctl);
  361. debug("cpu: frequency set to %d\n",
  362. ((perf_ctl.lo >> 8) & 0xff) * CPU_BCLK);
  363. }
  364. int broadwell_init(struct udevice *dev)
  365. {
  366. struct cpu_broadwell_priv *priv = dev_get_priv(dev);
  367. int num_threads;
  368. int num_cores;
  369. msr_t msr;
  370. int ret;
  371. msr = msr_read(CORE_THREAD_COUNT_MSR);
  372. num_threads = (msr.lo >> 0) & 0xffff;
  373. num_cores = (msr.lo >> 16) & 0xffff;
  374. debug("CPU has %u cores, %u threads enabled\n", num_cores,
  375. num_threads);
  376. priv->ht_disabled = num_threads == num_cores;
  377. ret = bsp_init_before_ap_bringup(dev);
  378. if (ret)
  379. return ret;
  380. set_max_ratio();
  381. return ret;
  382. }
  383. static void configure_mca(void)
  384. {
  385. msr_t msr;
  386. const unsigned int mcg_cap_msr = 0x179;
  387. int i;
  388. int num_banks;
  389. msr = msr_read(mcg_cap_msr);
  390. num_banks = msr.lo & 0xff;
  391. msr.lo = 0;
  392. msr.hi = 0;
  393. /*
  394. * TODO(adurbin): This should only be done on a cold boot. Also, some
  395. * of these banks are core vs package scope. For now every CPU clears
  396. * every bank
  397. */
  398. for (i = 0; i < num_banks; i++)
  399. msr_write(MSR_IA32_MC0_STATUS + (i * 4), msr);
  400. }
  401. static void enable_lapic_tpr(void)
  402. {
  403. msr_t msr;
  404. msr = msr_read(MSR_PIC_MSG_CONTROL);
  405. msr.lo &= ~(1 << 10); /* Enable APIC TPR updates */
  406. msr_write(MSR_PIC_MSG_CONTROL, msr);
  407. }
  408. static void configure_c_states(void)
  409. {
  410. msr_t msr;
  411. msr = msr_read(MSR_PMG_CST_CONFIG_CONTROL);
  412. msr.lo |= (1 << 31); /* Timed MWAIT Enable */
  413. msr.lo |= (1 << 30); /* Package c-state Undemotion Enable */
  414. msr.lo |= (1 << 29); /* Package c-state Demotion Enable */
  415. msr.lo |= (1 << 28); /* C1 Auto Undemotion Enable */
  416. msr.lo |= (1 << 27); /* C3 Auto Undemotion Enable */
  417. msr.lo |= (1 << 26); /* C1 Auto Demotion Enable */
  418. msr.lo |= (1 << 25); /* C3 Auto Demotion Enable */
  419. msr.lo &= ~(1 << 10); /* Disable IO MWAIT redirection */
  420. /* The deepest package c-state defaults to factory-configured value */
  421. msr_write(MSR_PMG_CST_CONFIG_CONTROL, msr);
  422. msr = msr_read(MSR_MISC_PWR_MGMT);
  423. msr.lo &= ~(1 << 0); /* Enable P-state HW_ALL coordination */
  424. msr_write(MSR_MISC_PWR_MGMT, msr);
  425. msr = msr_read(MSR_POWER_CTL);
  426. msr.lo |= (1 << 18); /* Enable Energy Perf Bias MSR 0x1b0 */
  427. msr.lo |= (1 << 1); /* C1E Enable */
  428. msr.lo |= (1 << 0); /* Bi-directional PROCHOT# */
  429. msr_write(MSR_POWER_CTL, msr);
  430. /* C-state Interrupt Response Latency Control 0 - package C3 latency */
  431. msr.hi = 0;
  432. msr.lo = IRTL_VALID | IRTL_1024_NS | C_STATE_LATENCY_CONTROL_0_LIMIT;
  433. msr_write(MSR_C_STATE_LATENCY_CONTROL_0, msr);
  434. /* C-state Interrupt Response Latency Control 1 */
  435. msr.hi = 0;
  436. msr.lo = IRTL_VALID | IRTL_1024_NS | C_STATE_LATENCY_CONTROL_1_LIMIT;
  437. msr_write(MSR_C_STATE_LATENCY_CONTROL_1, msr);
  438. /* C-state Interrupt Response Latency Control 2 - package C6/C7 short */
  439. msr.hi = 0;
  440. msr.lo = IRTL_VALID | IRTL_1024_NS | C_STATE_LATENCY_CONTROL_2_LIMIT;
  441. msr_write(MSR_C_STATE_LATENCY_CONTROL_2, msr);
  442. /* C-state Interrupt Response Latency Control 3 - package C8 */
  443. msr.hi = 0;
  444. msr.lo = IRTL_VALID | IRTL_1024_NS | C_STATE_LATENCY_CONTROL_3_LIMIT;
  445. msr_write(MSR_C_STATE_LATENCY_CONTROL_3, msr);
  446. /* C-state Interrupt Response Latency Control 4 - package C9 */
  447. msr.hi = 0;
  448. msr.lo = IRTL_VALID | IRTL_1024_NS | C_STATE_LATENCY_CONTROL_4_LIMIT;
  449. msr_write(MSR_C_STATE_LATENCY_CONTROL_4, msr);
  450. /* C-state Interrupt Response Latency Control 5 - package C10 */
  451. msr.hi = 0;
  452. msr.lo = IRTL_VALID | IRTL_1024_NS | C_STATE_LATENCY_CONTROL_5_LIMIT;
  453. msr_write(MSR_C_STATE_LATENCY_CONTROL_5, msr);
  454. }
  455. static void configure_misc(void)
  456. {
  457. msr_t msr;
  458. msr = msr_read(MSR_IA32_MISC_ENABLE);
  459. msr.lo |= (1 << 0); /* Fast String enable */
  460. msr.lo |= (1 << 3); /* TM1/TM2/EMTTM enable */
  461. msr.lo |= (1 << 16); /* Enhanced SpeedStep Enable */
  462. msr_write(MSR_IA32_MISC_ENABLE, msr);
  463. /* Disable thermal interrupts */
  464. msr.lo = 0;
  465. msr.hi = 0;
  466. msr_write(MSR_IA32_THERM_INTERRUPT, msr);
  467. /* Enable package critical interrupt only */
  468. msr.lo = 1 << 4;
  469. msr.hi = 0;
  470. msr_write(MSR_IA32_PACKAGE_THERM_INTERRUPT, msr);
  471. }
  472. static void configure_thermal_target(struct udevice *dev)
  473. {
  474. int tcc_offset;
  475. msr_t msr;
  476. tcc_offset = fdtdec_get_int(gd->fdt_blob, dev_of_offset(dev),
  477. "intel,tcc-offset", 0);
  478. /* Set TCC activaiton offset if supported */
  479. msr = msr_read(MSR_PLATFORM_INFO);
  480. if ((msr.lo & (1 << 30)) && tcc_offset) {
  481. msr = msr_read(MSR_TEMPERATURE_TARGET);
  482. msr.lo &= ~(0xf << 24); /* Bits 27:24 */
  483. msr.lo |= (tcc_offset & 0xf) << 24;
  484. msr_write(MSR_TEMPERATURE_TARGET, msr);
  485. }
  486. }
  487. static void configure_dca_cap(void)
  488. {
  489. struct cpuid_result cpuid_regs;
  490. msr_t msr;
  491. /* Check feature flag in CPUID.(EAX=1):ECX[18]==1 */
  492. cpuid_regs = cpuid(1);
  493. if (cpuid_regs.ecx & (1 << 18)) {
  494. msr = msr_read(MSR_IA32_PLATFORM_DCA_CAP);
  495. msr.lo |= 1;
  496. msr_write(MSR_IA32_PLATFORM_DCA_CAP, msr);
  497. }
  498. }
  499. static void set_energy_perf_bias(u8 policy)
  500. {
  501. msr_t msr;
  502. int ecx;
  503. /* Determine if energy efficient policy is supported */
  504. ecx = cpuid_ecx(0x6);
  505. if (!(ecx & (1 << 3)))
  506. return;
  507. /* Energy Policy is bits 3:0 */
  508. msr = msr_read(MSR_IA32_ENERGY_PERFORMANCE_BIAS);
  509. msr.lo &= ~0xf;
  510. msr.lo |= policy & 0xf;
  511. msr_write(MSR_IA32_ENERGY_PERFORMANCE_BIAS, msr);
  512. debug("cpu: energy policy set to %u\n", policy);
  513. }
  514. /* All CPUs including BSP will run the following function */
  515. static void cpu_core_init(struct udevice *dev)
  516. {
  517. /* Clear out pending MCEs */
  518. configure_mca();
  519. /* Enable the local cpu apics */
  520. enable_lapic_tpr();
  521. /* Configure C States */
  522. configure_c_states();
  523. /* Configure Enhanced SpeedStep and Thermal Sensors */
  524. configure_misc();
  525. /* Thermal throttle activation offset */
  526. configure_thermal_target(dev);
  527. /* Enable Direct Cache Access */
  528. configure_dca_cap();
  529. /* Set energy policy */
  530. set_energy_perf_bias(ENERGY_POLICY_NORMAL);
  531. /* Enable Turbo */
  532. turbo_enable();
  533. }
  534. /*
  535. * Configure processor power limits if possible
  536. * This must be done AFTER set of BIOS_RESET_CPL
  537. */
  538. void cpu_set_power_limits(int power_limit_1_time)
  539. {
  540. msr_t msr;
  541. msr_t limit;
  542. unsigned power_unit;
  543. unsigned tdp, min_power, max_power, max_time;
  544. u8 power_limit_1_val;
  545. msr = msr_read(MSR_PLATFORM_INFO);
  546. if (power_limit_1_time > ARRAY_SIZE(power_limit_time_sec_to_msr))
  547. power_limit_1_time = 28;
  548. if (!(msr.lo & PLATFORM_INFO_SET_TDP))
  549. return;
  550. /* Get units */
  551. msr = msr_read(MSR_PKG_POWER_SKU_UNIT);
  552. power_unit = 2 << ((msr.lo & 0xf) - 1);
  553. /* Get power defaults for this SKU */
  554. msr = msr_read(MSR_PKG_POWER_SKU);
  555. tdp = msr.lo & 0x7fff;
  556. min_power = (msr.lo >> 16) & 0x7fff;
  557. max_power = msr.hi & 0x7fff;
  558. max_time = (msr.hi >> 16) & 0x7f;
  559. debug("CPU TDP: %u Watts\n", tdp / power_unit);
  560. if (power_limit_time_msr_to_sec[max_time] > power_limit_1_time)
  561. power_limit_1_time = power_limit_time_msr_to_sec[max_time];
  562. if (min_power > 0 && tdp < min_power)
  563. tdp = min_power;
  564. if (max_power > 0 && tdp > max_power)
  565. tdp = max_power;
  566. power_limit_1_val = power_limit_time_sec_to_msr[power_limit_1_time];
  567. /* Set long term power limit to TDP */
  568. limit.lo = 0;
  569. limit.lo |= tdp & PKG_POWER_LIMIT_MASK;
  570. limit.lo |= PKG_POWER_LIMIT_EN;
  571. limit.lo |= (power_limit_1_val & PKG_POWER_LIMIT_TIME_MASK) <<
  572. PKG_POWER_LIMIT_TIME_SHIFT;
  573. /* Set short term power limit to 1.25 * TDP */
  574. limit.hi = 0;
  575. limit.hi |= ((tdp * 125) / 100) & PKG_POWER_LIMIT_MASK;
  576. limit.hi |= PKG_POWER_LIMIT_EN;
  577. /* Power limit 2 time is only programmable on server SKU */
  578. msr_write(MSR_PKG_POWER_LIMIT, limit);
  579. /* Set power limit values in MCHBAR as well */
  580. writel(limit.lo, MCHBAR_REG(MCH_PKG_POWER_LIMIT_LO));
  581. writel(limit.hi, MCHBAR_REG(MCH_PKG_POWER_LIMIT_HI));
  582. /* Set DDR RAPL power limit by copying from MMIO to MSR */
  583. msr.lo = readl(MCHBAR_REG(MCH_DDR_POWER_LIMIT_LO));
  584. msr.hi = readl(MCHBAR_REG(MCH_DDR_POWER_LIMIT_HI));
  585. msr_write(MSR_DDR_RAPL_LIMIT, msr);
  586. /* Use nominal TDP values for CPUs with configurable TDP */
  587. if (cpu_config_tdp_levels()) {
  588. msr = msr_read(MSR_CONFIG_TDP_NOMINAL);
  589. limit.hi = 0;
  590. limit.lo = msr.lo & 0xff;
  591. msr_write(MSR_TURBO_ACTIVATION_RATIO, limit);
  592. }
  593. }
  594. static int broadwell_get_info(struct udevice *dev, struct cpu_info *info)
  595. {
  596. msr_t msr;
  597. msr = msr_read(IA32_PERF_CTL);
  598. info->cpu_freq = ((msr.lo >> 8) & 0xff) * BROADWELL_BCLK * 1000000;
  599. info->features = 1 << CPU_FEAT_L1_CACHE | 1 << CPU_FEAT_MMU |
  600. 1 << CPU_FEAT_UCODE | 1 << CPU_FEAT_DEVICE_ID;
  601. return 0;
  602. }
  603. static int broadwell_get_count(struct udevice *dev)
  604. {
  605. return 4;
  606. }
  607. static int cpu_x86_broadwell_probe(struct udevice *dev)
  608. {
  609. if (dev->seq == 0) {
  610. cpu_core_init(dev);
  611. return broadwell_init(dev);
  612. }
  613. return 0;
  614. }
  615. static const struct cpu_ops cpu_x86_broadwell_ops = {
  616. .get_desc = cpu_x86_get_desc,
  617. .get_info = broadwell_get_info,
  618. .get_count = broadwell_get_count,
  619. .get_vendor = cpu_x86_get_vendor,
  620. };
  621. static const struct udevice_id cpu_x86_broadwell_ids[] = {
  622. { .compatible = "intel,core-i3-gen5" },
  623. { }
  624. };
  625. U_BOOT_DRIVER(cpu_x86_broadwell_drv) = {
  626. .name = "cpu_x86_broadwell",
  627. .id = UCLASS_CPU,
  628. .of_match = cpu_x86_broadwell_ids,
  629. .bind = cpu_x86_bind,
  630. .probe = cpu_x86_broadwell_probe,
  631. .ops = &cpu_x86_broadwell_ops,
  632. .priv_auto_alloc_size = sizeof(struct cpu_broadwell_priv),
  633. .flags = DM_FLAG_PRE_RELOC,
  634. };