sequencer.c 109 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805
  1. /*
  2. * Copyright Altera Corporation (C) 2012-2015
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #include <common.h>
  7. #include <asm/io.h>
  8. #include <asm/arch/sdram.h>
  9. #include "sequencer.h"
  10. #include "sequencer_auto.h"
  11. #include "sequencer_auto_ac_init.h"
  12. #include "sequencer_auto_inst_init.h"
  13. #include "sequencer_defines.h"
  14. static void scc_mgr_load_dqs_for_write_group(uint32_t write_group);
  15. static struct socfpga_sdr_rw_load_manager *sdr_rw_load_mgr_regs =
  16. (struct socfpga_sdr_rw_load_manager *)(SDR_PHYGRP_RWMGRGRP_ADDRESS | 0x800);
  17. static struct socfpga_sdr_rw_load_jump_manager *sdr_rw_load_jump_mgr_regs =
  18. (struct socfpga_sdr_rw_load_jump_manager *)(SDR_PHYGRP_RWMGRGRP_ADDRESS | 0xC00);
  19. static struct socfpga_sdr_reg_file *sdr_reg_file =
  20. (struct socfpga_sdr_reg_file *)SDR_PHYGRP_REGFILEGRP_ADDRESS;
  21. static struct socfpga_sdr_scc_mgr *sdr_scc_mgr =
  22. (struct socfpga_sdr_scc_mgr *)(SDR_PHYGRP_SCCGRP_ADDRESS | 0xe00);
  23. static struct socfpga_phy_mgr_cmd *phy_mgr_cmd =
  24. (struct socfpga_phy_mgr_cmd *)SDR_PHYGRP_PHYMGRGRP_ADDRESS;
  25. static struct socfpga_phy_mgr_cfg *phy_mgr_cfg =
  26. (struct socfpga_phy_mgr_cfg *)(SDR_PHYGRP_PHYMGRGRP_ADDRESS | 0x40);
  27. static struct socfpga_data_mgr *data_mgr =
  28. (struct socfpga_data_mgr *)SDR_PHYGRP_DATAMGRGRP_ADDRESS;
  29. static struct socfpga_sdr_ctrl *sdr_ctrl =
  30. (struct socfpga_sdr_ctrl *)SDR_CTRLGRP_ADDRESS;
  31. #define DELTA_D 1
  32. /*
  33. * In order to reduce ROM size, most of the selectable calibration steps are
  34. * decided at compile time based on the user's calibration mode selection,
  35. * as captured by the STATIC_CALIB_STEPS selection below.
  36. *
  37. * However, to support simulation-time selection of fast simulation mode, where
  38. * we skip everything except the bare minimum, we need a few of the steps to
  39. * be dynamic. In those cases, we either use the DYNAMIC_CALIB_STEPS for the
  40. * check, which is based on the rtl-supplied value, or we dynamically compute
  41. * the value to use based on the dynamically-chosen calibration mode
  42. */
  43. #define DLEVEL 0
  44. #define STATIC_IN_RTL_SIM 0
  45. #define STATIC_SKIP_DELAY_LOOPS 0
  46. #define STATIC_CALIB_STEPS (STATIC_IN_RTL_SIM | CALIB_SKIP_FULL_TEST | \
  47. STATIC_SKIP_DELAY_LOOPS)
  48. /* calibration steps requested by the rtl */
  49. uint16_t dyn_calib_steps;
  50. /*
  51. * To make CALIB_SKIP_DELAY_LOOPS a dynamic conditional option
  52. * instead of static, we use boolean logic to select between
  53. * non-skip and skip values
  54. *
  55. * The mask is set to include all bits when not-skipping, but is
  56. * zero when skipping
  57. */
  58. uint16_t skip_delay_mask; /* mask off bits when skipping/not-skipping */
  59. #define SKIP_DELAY_LOOP_VALUE_OR_ZERO(non_skip_value) \
  60. ((non_skip_value) & skip_delay_mask)
  61. struct gbl_type *gbl;
  62. struct param_type *param;
  63. uint32_t curr_shadow_reg;
  64. static uint32_t rw_mgr_mem_calibrate_write_test(uint32_t rank_bgn,
  65. uint32_t write_group, uint32_t use_dm,
  66. uint32_t all_correct, uint32_t *bit_chk, uint32_t all_ranks);
  67. static void set_failing_group_stage(uint32_t group, uint32_t stage,
  68. uint32_t substage)
  69. {
  70. /*
  71. * Only set the global stage if there was not been any other
  72. * failing group
  73. */
  74. if (gbl->error_stage == CAL_STAGE_NIL) {
  75. gbl->error_substage = substage;
  76. gbl->error_stage = stage;
  77. gbl->error_group = group;
  78. }
  79. }
  80. static void reg_file_set_group(u16 set_group)
  81. {
  82. clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff0000, set_group << 16);
  83. }
  84. static void reg_file_set_stage(u8 set_stage)
  85. {
  86. clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff, set_stage & 0xff);
  87. }
  88. static void reg_file_set_sub_stage(u8 set_sub_stage)
  89. {
  90. set_sub_stage &= 0xff;
  91. clrsetbits_le32(&sdr_reg_file->cur_stage, 0xff00, set_sub_stage << 8);
  92. }
  93. static void initialize(void)
  94. {
  95. debug("%s:%d\n", __func__, __LINE__);
  96. /* USER calibration has control over path to memory */
  97. /*
  98. * In Hard PHY this is a 2-bit control:
  99. * 0: AFI Mux Select
  100. * 1: DDIO Mux Select
  101. */
  102. writel(0x3, &phy_mgr_cfg->mux_sel);
  103. /* USER memory clock is not stable we begin initialization */
  104. writel(0, &phy_mgr_cfg->reset_mem_stbl);
  105. /* USER calibration status all set to zero */
  106. writel(0, &phy_mgr_cfg->cal_status);
  107. writel(0, &phy_mgr_cfg->cal_debug_info);
  108. if ((dyn_calib_steps & CALIB_SKIP_ALL) != CALIB_SKIP_ALL) {
  109. param->read_correct_mask_vg = ((uint32_t)1 <<
  110. (RW_MGR_MEM_DQ_PER_READ_DQS /
  111. RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS)) - 1;
  112. param->write_correct_mask_vg = ((uint32_t)1 <<
  113. (RW_MGR_MEM_DQ_PER_READ_DQS /
  114. RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS)) - 1;
  115. param->read_correct_mask = ((uint32_t)1 <<
  116. RW_MGR_MEM_DQ_PER_READ_DQS) - 1;
  117. param->write_correct_mask = ((uint32_t)1 <<
  118. RW_MGR_MEM_DQ_PER_WRITE_DQS) - 1;
  119. param->dm_correct_mask = ((uint32_t)1 <<
  120. (RW_MGR_MEM_DATA_WIDTH / RW_MGR_MEM_DATA_MASK_WIDTH))
  121. - 1;
  122. }
  123. }
  124. static void set_rank_and_odt_mask(uint32_t rank, uint32_t odt_mode)
  125. {
  126. uint32_t odt_mask_0 = 0;
  127. uint32_t odt_mask_1 = 0;
  128. uint32_t cs_and_odt_mask;
  129. if (odt_mode == RW_MGR_ODT_MODE_READ_WRITE) {
  130. if (RW_MGR_MEM_NUMBER_OF_RANKS == 1) {
  131. /*
  132. * 1 Rank
  133. * Read: ODT = 0
  134. * Write: ODT = 1
  135. */
  136. odt_mask_0 = 0x0;
  137. odt_mask_1 = 0x1;
  138. } else if (RW_MGR_MEM_NUMBER_OF_RANKS == 2) {
  139. /* 2 Ranks */
  140. if (RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM == 1) {
  141. /* - Dual-Slot , Single-Rank
  142. * (1 chip-select per DIMM)
  143. * OR
  144. * - RDIMM, 4 total CS (2 CS per DIMM)
  145. * means 2 DIMM
  146. * Since MEM_NUMBER_OF_RANKS is 2 they are
  147. * both single rank
  148. * with 2 CS each (special for RDIMM)
  149. * Read: Turn on ODT on the opposite rank
  150. * Write: Turn on ODT on all ranks
  151. */
  152. odt_mask_0 = 0x3 & ~(1 << rank);
  153. odt_mask_1 = 0x3;
  154. } else {
  155. /*
  156. * USER - Single-Slot , Dual-rank DIMMs
  157. * (2 chip-selects per DIMM)
  158. * USER Read: Turn on ODT off on all ranks
  159. * USER Write: Turn on ODT on active rank
  160. */
  161. odt_mask_0 = 0x0;
  162. odt_mask_1 = 0x3 & (1 << rank);
  163. }
  164. } else {
  165. /* 4 Ranks
  166. * Read:
  167. * ----------+-----------------------+
  168. * | |
  169. * | ODT |
  170. * Read From +-----------------------+
  171. * Rank | 3 | 2 | 1 | 0 |
  172. * ----------+-----+-----+-----+-----+
  173. * 0 | 0 | 1 | 0 | 0 |
  174. * 1 | 1 | 0 | 0 | 0 |
  175. * 2 | 0 | 0 | 0 | 1 |
  176. * 3 | 0 | 0 | 1 | 0 |
  177. * ----------+-----+-----+-----+-----+
  178. *
  179. * Write:
  180. * ----------+-----------------------+
  181. * | |
  182. * | ODT |
  183. * Write To +-----------------------+
  184. * Rank | 3 | 2 | 1 | 0 |
  185. * ----------+-----+-----+-----+-----+
  186. * 0 | 0 | 1 | 0 | 1 |
  187. * 1 | 1 | 0 | 1 | 0 |
  188. * 2 | 0 | 1 | 0 | 1 |
  189. * 3 | 1 | 0 | 1 | 0 |
  190. * ----------+-----+-----+-----+-----+
  191. */
  192. switch (rank) {
  193. case 0:
  194. odt_mask_0 = 0x4;
  195. odt_mask_1 = 0x5;
  196. break;
  197. case 1:
  198. odt_mask_0 = 0x8;
  199. odt_mask_1 = 0xA;
  200. break;
  201. case 2:
  202. odt_mask_0 = 0x1;
  203. odt_mask_1 = 0x5;
  204. break;
  205. case 3:
  206. odt_mask_0 = 0x2;
  207. odt_mask_1 = 0xA;
  208. break;
  209. }
  210. }
  211. } else {
  212. odt_mask_0 = 0x0;
  213. odt_mask_1 = 0x0;
  214. }
  215. cs_and_odt_mask =
  216. (0xFF & ~(1 << rank)) |
  217. ((0xFF & odt_mask_0) << 8) |
  218. ((0xFF & odt_mask_1) << 16);
  219. writel(cs_and_odt_mask, SDR_PHYGRP_RWMGRGRP_ADDRESS |
  220. RW_MGR_SET_CS_AND_ODT_MASK_OFFSET);
  221. }
  222. /**
  223. * scc_mgr_set() - Set SCC Manager register
  224. * @off: Base offset in SCC Manager space
  225. * @grp: Read/Write group
  226. * @val: Value to be set
  227. *
  228. * This function sets the SCC Manager (Scan Chain Control Manager) register.
  229. */
  230. static void scc_mgr_set(u32 off, u32 grp, u32 val)
  231. {
  232. writel(val, SDR_PHYGRP_SCCGRP_ADDRESS | off | (grp << 2));
  233. }
  234. /**
  235. * scc_mgr_initialize() - Initialize SCC Manager registers
  236. *
  237. * Initialize SCC Manager registers.
  238. */
  239. static void scc_mgr_initialize(void)
  240. {
  241. /*
  242. * Clear register file for HPS. 16 (2^4) is the size of the
  243. * full register file in the scc mgr:
  244. * RFILE_DEPTH = 1 + log2(MEM_DQ_PER_DQS + 1 + MEM_DM_PER_DQS +
  245. * MEM_IF_READ_DQS_WIDTH - 1);
  246. */
  247. int i;
  248. for (i = 0; i < 16; i++) {
  249. debug_cond(DLEVEL == 1, "%s:%d: Clearing SCC RFILE index %u\n",
  250. __func__, __LINE__, i);
  251. scc_mgr_set(SCC_MGR_HHP_RFILE_OFFSET, 0, i);
  252. }
  253. }
  254. static void scc_mgr_set_dqdqs_output_phase(uint32_t write_group, uint32_t phase)
  255. {
  256. scc_mgr_set(SCC_MGR_DQDQS_OUT_PHASE_OFFSET, write_group, phase);
  257. }
  258. static void scc_mgr_set_dqs_bus_in_delay(uint32_t read_group, uint32_t delay)
  259. {
  260. scc_mgr_set(SCC_MGR_DQS_IN_DELAY_OFFSET, read_group, delay);
  261. }
  262. static void scc_mgr_set_dqs_en_phase(uint32_t read_group, uint32_t phase)
  263. {
  264. scc_mgr_set(SCC_MGR_DQS_EN_PHASE_OFFSET, read_group, phase);
  265. }
  266. static void scc_mgr_set_dqs_en_delay(uint32_t read_group, uint32_t delay)
  267. {
  268. scc_mgr_set(SCC_MGR_DQS_EN_DELAY_OFFSET, read_group, delay);
  269. }
  270. static void scc_mgr_set_dqs_io_in_delay(uint32_t write_group, uint32_t delay)
  271. {
  272. scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, RW_MGR_MEM_DQ_PER_WRITE_DQS,
  273. delay);
  274. }
  275. static void scc_mgr_set_dq_in_delay(uint32_t dq_in_group, uint32_t delay)
  276. {
  277. scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, dq_in_group, delay);
  278. }
  279. static void scc_mgr_set_dq_out1_delay(uint32_t dq_in_group, uint32_t delay)
  280. {
  281. scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, dq_in_group, delay);
  282. }
  283. static void scc_mgr_set_dqs_out1_delay(uint32_t write_group,
  284. uint32_t delay)
  285. {
  286. scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, RW_MGR_MEM_DQ_PER_WRITE_DQS,
  287. delay);
  288. }
  289. static void scc_mgr_set_dm_out1_delay(uint32_t dm, uint32_t delay)
  290. {
  291. scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET,
  292. RW_MGR_MEM_DQ_PER_WRITE_DQS + 1 + dm,
  293. delay);
  294. }
  295. /* load up dqs config settings */
  296. static void scc_mgr_load_dqs(uint32_t dqs)
  297. {
  298. writel(dqs, &sdr_scc_mgr->dqs_ena);
  299. }
  300. /* load up dqs io config settings */
  301. static void scc_mgr_load_dqs_io(void)
  302. {
  303. writel(0, &sdr_scc_mgr->dqs_io_ena);
  304. }
  305. /* load up dq config settings */
  306. static void scc_mgr_load_dq(uint32_t dq_in_group)
  307. {
  308. writel(dq_in_group, &sdr_scc_mgr->dq_ena);
  309. }
  310. /* load up dm config settings */
  311. static void scc_mgr_load_dm(uint32_t dm)
  312. {
  313. writel(dm, &sdr_scc_mgr->dm_ena);
  314. }
  315. static void scc_mgr_set_dqs_en_phase_all_ranks(uint32_t read_group,
  316. uint32_t phase)
  317. {
  318. uint32_t r;
  319. uint32_t update_scan_chains;
  320. for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
  321. r += NUM_RANKS_PER_SHADOW_REG) {
  322. /*
  323. * USER although the h/w doesn't support different phases per
  324. * shadow register, for simplicity our scc manager modeling
  325. * keeps different phase settings per shadow reg, and it's
  326. * important for us to keep them in sync to match h/w.
  327. * for efficiency, the scan chain update should occur only
  328. * once to sr0.
  329. */
  330. update_scan_chains = (r == 0) ? 1 : 0;
  331. scc_mgr_set_dqs_en_phase(read_group, phase);
  332. if (update_scan_chains) {
  333. writel(read_group, &sdr_scc_mgr->dqs_ena);
  334. writel(0, &sdr_scc_mgr->update);
  335. }
  336. }
  337. }
  338. static void scc_mgr_set_dqdqs_output_phase_all_ranks(uint32_t write_group,
  339. uint32_t phase)
  340. {
  341. uint32_t r;
  342. uint32_t update_scan_chains;
  343. for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
  344. r += NUM_RANKS_PER_SHADOW_REG) {
  345. /*
  346. * USER although the h/w doesn't support different phases per
  347. * shadow register, for simplicity our scc manager modeling
  348. * keeps different phase settings per shadow reg, and it's
  349. * important for us to keep them in sync to match h/w.
  350. * for efficiency, the scan chain update should occur only
  351. * once to sr0.
  352. */
  353. update_scan_chains = (r == 0) ? 1 : 0;
  354. scc_mgr_set_dqdqs_output_phase(write_group, phase);
  355. if (update_scan_chains) {
  356. writel(write_group, &sdr_scc_mgr->dqs_ena);
  357. writel(0, &sdr_scc_mgr->update);
  358. }
  359. }
  360. }
  361. static void scc_mgr_set_dqs_en_delay_all_ranks(uint32_t read_group,
  362. uint32_t delay)
  363. {
  364. uint32_t r;
  365. for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
  366. r += NUM_RANKS_PER_SHADOW_REG) {
  367. scc_mgr_set_dqs_en_delay(read_group, delay);
  368. writel(read_group, &sdr_scc_mgr->dqs_ena);
  369. /*
  370. * In shadow register mode, the T11 settings are stored in
  371. * registers in the core, which are updated by the DQS_ENA
  372. * signals. Not issuing the SCC_MGR_UPD command allows us to
  373. * save lots of rank switching overhead, by calling
  374. * select_shadow_regs_for_update with update_scan_chains
  375. * set to 0.
  376. */
  377. writel(0, &sdr_scc_mgr->update);
  378. }
  379. /*
  380. * In shadow register mode, the T11 settings are stored in
  381. * registers in the core, which are updated by the DQS_ENA
  382. * signals. Not issuing the SCC_MGR_UPD command allows us to
  383. * save lots of rank switching overhead, by calling
  384. * select_shadow_regs_for_update with update_scan_chains
  385. * set to 0.
  386. */
  387. writel(0, &sdr_scc_mgr->update);
  388. }
  389. static void scc_mgr_set_oct_out1_delay(uint32_t write_group, uint32_t delay)
  390. {
  391. uint32_t read_group;
  392. uint32_t addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_OCT_OUT1_DELAY_OFFSET;
  393. /*
  394. * Load the setting in the SCC manager
  395. * Although OCT affects only write data, the OCT delay is controlled
  396. * by the DQS logic block which is instantiated once per read group.
  397. * For protocols where a write group consists of multiple read groups,
  398. * the setting must be set multiple times.
  399. */
  400. for (read_group = write_group * RW_MGR_MEM_IF_READ_DQS_WIDTH /
  401. RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
  402. read_group < (write_group + 1) * RW_MGR_MEM_IF_READ_DQS_WIDTH /
  403. RW_MGR_MEM_IF_WRITE_DQS_WIDTH; ++read_group)
  404. writel(delay, addr + (read_group << 2));
  405. }
  406. static void scc_mgr_set_hhp_extras(void)
  407. {
  408. /*
  409. * Load the fixed setting in the SCC manager
  410. * bits: 0:0 = 1'b1 - dqs bypass
  411. * bits: 1:1 = 1'b1 - dq bypass
  412. * bits: 4:2 = 3'b001 - rfifo_mode
  413. * bits: 6:5 = 2'b01 - rfifo clock_select
  414. * bits: 7:7 = 1'b0 - separate gating from ungating setting
  415. * bits: 8:8 = 1'b0 - separate OE from Output delay setting
  416. */
  417. uint32_t value = (0<<8) | (0<<7) | (1<<5) | (1<<2) | (1<<1) | (1<<0);
  418. uint32_t addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_HHP_GLOBALS_OFFSET;
  419. writel(value, addr + SCC_MGR_HHP_EXTRAS_OFFSET);
  420. }
  421. /*
  422. * USER Zero all DQS config
  423. * TODO: maybe rename to scc_mgr_zero_dqs_config (or something)
  424. */
  425. static void scc_mgr_zero_all(void)
  426. {
  427. uint32_t i, r;
  428. /*
  429. * USER Zero all DQS config settings, across all groups and all
  430. * shadow registers
  431. */
  432. for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r +=
  433. NUM_RANKS_PER_SHADOW_REG) {
  434. for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
  435. /*
  436. * The phases actually don't exist on a per-rank basis,
  437. * but there's no harm updating them several times, so
  438. * let's keep the code simple.
  439. */
  440. scc_mgr_set_dqs_bus_in_delay(i, IO_DQS_IN_RESERVE);
  441. scc_mgr_set_dqs_en_phase(i, 0);
  442. scc_mgr_set_dqs_en_delay(i, 0);
  443. }
  444. for (i = 0; i < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; i++) {
  445. scc_mgr_set_dqdqs_output_phase(i, 0);
  446. /* av/cv don't have out2 */
  447. scc_mgr_set_oct_out1_delay(i, IO_DQS_OUT_RESERVE);
  448. }
  449. }
  450. /* multicast to all DQS group enables */
  451. writel(0xff, &sdr_scc_mgr->dqs_ena);
  452. writel(0, &sdr_scc_mgr->update);
  453. }
  454. static void scc_set_bypass_mode(uint32_t write_group, uint32_t mode)
  455. {
  456. /* mode = 0 : Do NOT bypass - Half Rate Mode */
  457. /* mode = 1 : Bypass - Full Rate Mode */
  458. /* only need to set once for all groups, pins, dq, dqs, dm */
  459. if (write_group == 0) {
  460. debug_cond(DLEVEL == 1, "%s:%d Setting HHP Extras\n", __func__,
  461. __LINE__);
  462. scc_mgr_set_hhp_extras();
  463. debug_cond(DLEVEL == 1, "%s:%d Done Setting HHP Extras\n",
  464. __func__, __LINE__);
  465. }
  466. /* multicast to all DQ enables */
  467. writel(0xff, &sdr_scc_mgr->dq_ena);
  468. writel(0xff, &sdr_scc_mgr->dm_ena);
  469. /* update current DQS IO enable */
  470. writel(0, &sdr_scc_mgr->dqs_io_ena);
  471. /* update the DQS logic */
  472. writel(write_group, &sdr_scc_mgr->dqs_ena);
  473. /* hit update */
  474. writel(0, &sdr_scc_mgr->update);
  475. }
  476. static void scc_mgr_load_dqs_for_write_group(uint32_t write_group)
  477. {
  478. uint32_t read_group;
  479. uint32_t addr = (u32)&sdr_scc_mgr->dqs_ena;
  480. /*
  481. * Although OCT affects only write data, the OCT delay is controlled
  482. * by the DQS logic block which is instantiated once per read group.
  483. * For protocols where a write group consists of multiple read groups,
  484. * the setting must be scanned multiple times.
  485. */
  486. for (read_group = write_group * RW_MGR_MEM_IF_READ_DQS_WIDTH /
  487. RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
  488. read_group < (write_group + 1) * RW_MGR_MEM_IF_READ_DQS_WIDTH /
  489. RW_MGR_MEM_IF_WRITE_DQS_WIDTH; ++read_group)
  490. writel(read_group, addr);
  491. }
  492. static void scc_mgr_zero_group(uint32_t write_group, uint32_t test_begin,
  493. int32_t out_only)
  494. {
  495. uint32_t i, r;
  496. for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r +=
  497. NUM_RANKS_PER_SHADOW_REG) {
  498. /* Zero all DQ config settings */
  499. for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
  500. scc_mgr_set_dq_out1_delay(i, 0);
  501. if (!out_only)
  502. scc_mgr_set_dq_in_delay(i, 0);
  503. }
  504. /* multicast to all DQ enables */
  505. writel(0xff, &sdr_scc_mgr->dq_ena);
  506. /* Zero all DM config settings */
  507. for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) {
  508. scc_mgr_set_dm_out1_delay(i, 0);
  509. }
  510. /* multicast to all DM enables */
  511. writel(0xff, &sdr_scc_mgr->dm_ena);
  512. /* zero all DQS io settings */
  513. if (!out_only)
  514. scc_mgr_set_dqs_io_in_delay(write_group, 0);
  515. /* av/cv don't have out2 */
  516. scc_mgr_set_dqs_out1_delay(write_group, IO_DQS_OUT_RESERVE);
  517. scc_mgr_set_oct_out1_delay(write_group, IO_DQS_OUT_RESERVE);
  518. scc_mgr_load_dqs_for_write_group(write_group);
  519. /* multicast to all DQS IO enables (only 1) */
  520. writel(0, &sdr_scc_mgr->dqs_io_ena);
  521. /* hit update to zero everything */
  522. writel(0, &sdr_scc_mgr->update);
  523. }
  524. }
  525. /*
  526. * apply and load a particular input delay for the DQ pins in a group
  527. * group_bgn is the index of the first dq pin (in the write group)
  528. */
  529. static void scc_mgr_apply_group_dq_in_delay(uint32_t write_group,
  530. uint32_t group_bgn, uint32_t delay)
  531. {
  532. uint32_t i, p;
  533. for (i = 0, p = group_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++) {
  534. scc_mgr_set_dq_in_delay(p, delay);
  535. scc_mgr_load_dq(p);
  536. }
  537. }
  538. /* apply and load a particular output delay for the DQ pins in a group */
  539. static void scc_mgr_apply_group_dq_out1_delay(uint32_t write_group,
  540. uint32_t group_bgn,
  541. uint32_t delay1)
  542. {
  543. uint32_t i, p;
  544. for (i = 0, p = group_bgn; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++, p++) {
  545. scc_mgr_set_dq_out1_delay(i, delay1);
  546. scc_mgr_load_dq(i);
  547. }
  548. }
  549. /* apply and load a particular output delay for the DM pins in a group */
  550. static void scc_mgr_apply_group_dm_out1_delay(uint32_t write_group,
  551. uint32_t delay1)
  552. {
  553. uint32_t i;
  554. for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) {
  555. scc_mgr_set_dm_out1_delay(i, delay1);
  556. scc_mgr_load_dm(i);
  557. }
  558. }
  559. /* apply and load delay on both DQS and OCT out1 */
  560. static void scc_mgr_apply_group_dqs_io_and_oct_out1(uint32_t write_group,
  561. uint32_t delay)
  562. {
  563. scc_mgr_set_dqs_out1_delay(write_group, delay);
  564. scc_mgr_load_dqs_io();
  565. scc_mgr_set_oct_out1_delay(write_group, delay);
  566. scc_mgr_load_dqs_for_write_group(write_group);
  567. }
  568. /* apply a delay to the entire output side: DQ, DM, DQS, OCT */
  569. static void scc_mgr_apply_group_all_out_delay_add(uint32_t write_group,
  570. uint32_t group_bgn,
  571. uint32_t delay)
  572. {
  573. uint32_t i, p, new_delay;
  574. /* dq shift */
  575. for (i = 0, p = group_bgn; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++, p++) {
  576. new_delay = READ_SCC_DQ_OUT2_DELAY;
  577. new_delay += delay;
  578. if (new_delay > IO_IO_OUT2_DELAY_MAX) {
  579. debug_cond(DLEVEL == 1, "%s:%d (%u, %u, %u) DQ[%u,%u]:\
  580. %u > %lu => %lu", __func__, __LINE__,
  581. write_group, group_bgn, delay, i, p, new_delay,
  582. (long unsigned int)IO_IO_OUT2_DELAY_MAX,
  583. (long unsigned int)IO_IO_OUT2_DELAY_MAX);
  584. new_delay = IO_IO_OUT2_DELAY_MAX;
  585. }
  586. scc_mgr_load_dq(i);
  587. }
  588. /* dm shift */
  589. for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) {
  590. new_delay = READ_SCC_DM_IO_OUT2_DELAY;
  591. new_delay += delay;
  592. if (new_delay > IO_IO_OUT2_DELAY_MAX) {
  593. debug_cond(DLEVEL == 1, "%s:%d (%u, %u, %u) DM[%u]:\
  594. %u > %lu => %lu\n", __func__, __LINE__,
  595. write_group, group_bgn, delay, i, new_delay,
  596. (long unsigned int)IO_IO_OUT2_DELAY_MAX,
  597. (long unsigned int)IO_IO_OUT2_DELAY_MAX);
  598. new_delay = IO_IO_OUT2_DELAY_MAX;
  599. }
  600. scc_mgr_load_dm(i);
  601. }
  602. /* dqs shift */
  603. new_delay = READ_SCC_DQS_IO_OUT2_DELAY;
  604. new_delay += delay;
  605. if (new_delay > IO_IO_OUT2_DELAY_MAX) {
  606. debug_cond(DLEVEL == 1, "%s:%d (%u, %u, %u) DQS: %u > %d => %d;"
  607. " adding %u to OUT1\n", __func__, __LINE__,
  608. write_group, group_bgn, delay, new_delay,
  609. IO_IO_OUT2_DELAY_MAX, IO_IO_OUT2_DELAY_MAX,
  610. new_delay - IO_IO_OUT2_DELAY_MAX);
  611. scc_mgr_set_dqs_out1_delay(write_group, new_delay -
  612. IO_IO_OUT2_DELAY_MAX);
  613. new_delay = IO_IO_OUT2_DELAY_MAX;
  614. }
  615. scc_mgr_load_dqs_io();
  616. /* oct shift */
  617. new_delay = READ_SCC_OCT_OUT2_DELAY;
  618. new_delay += delay;
  619. if (new_delay > IO_IO_OUT2_DELAY_MAX) {
  620. debug_cond(DLEVEL == 1, "%s:%d (%u, %u, %u) DQS: %u > %d => %d;"
  621. " adding %u to OUT1\n", __func__, __LINE__,
  622. write_group, group_bgn, delay, new_delay,
  623. IO_IO_OUT2_DELAY_MAX, IO_IO_OUT2_DELAY_MAX,
  624. new_delay - IO_IO_OUT2_DELAY_MAX);
  625. scc_mgr_set_oct_out1_delay(write_group, new_delay -
  626. IO_IO_OUT2_DELAY_MAX);
  627. new_delay = IO_IO_OUT2_DELAY_MAX;
  628. }
  629. scc_mgr_load_dqs_for_write_group(write_group);
  630. }
  631. /*
  632. * USER apply a delay to the entire output side (DQ, DM, DQS, OCT)
  633. * and to all ranks
  634. */
  635. static void scc_mgr_apply_group_all_out_delay_add_all_ranks(
  636. uint32_t write_group, uint32_t group_bgn, uint32_t delay)
  637. {
  638. uint32_t r;
  639. for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
  640. r += NUM_RANKS_PER_SHADOW_REG) {
  641. scc_mgr_apply_group_all_out_delay_add(write_group,
  642. group_bgn, delay);
  643. writel(0, &sdr_scc_mgr->update);
  644. }
  645. }
  646. /* optimization used to recover some slots in ddr3 inst_rom */
  647. /* could be applied to other protocols if we wanted to */
  648. static void set_jump_as_return(void)
  649. {
  650. /*
  651. * to save space, we replace return with jump to special shared
  652. * RETURN instruction so we set the counter to large value so that
  653. * we always jump
  654. */
  655. writel(0xff, &sdr_rw_load_mgr_regs->load_cntr0);
  656. writel(RW_MGR_RETURN, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
  657. }
  658. /*
  659. * should always use constants as argument to ensure all computations are
  660. * performed at compile time
  661. */
  662. static void delay_for_n_mem_clocks(const uint32_t clocks)
  663. {
  664. uint32_t afi_clocks;
  665. uint8_t inner = 0;
  666. uint8_t outer = 0;
  667. uint16_t c_loop = 0;
  668. debug("%s:%d: clocks=%u ... start\n", __func__, __LINE__, clocks);
  669. afi_clocks = (clocks + AFI_RATE_RATIO-1) / AFI_RATE_RATIO;
  670. /* scale (rounding up) to get afi clocks */
  671. /*
  672. * Note, we don't bother accounting for being off a little bit
  673. * because of a few extra instructions in outer loops
  674. * Note, the loops have a test at the end, and do the test before
  675. * the decrement, and so always perform the loop
  676. * 1 time more than the counter value
  677. */
  678. if (afi_clocks == 0) {
  679. ;
  680. } else if (afi_clocks <= 0x100) {
  681. inner = afi_clocks-1;
  682. outer = 0;
  683. c_loop = 0;
  684. } else if (afi_clocks <= 0x10000) {
  685. inner = 0xff;
  686. outer = (afi_clocks-1) >> 8;
  687. c_loop = 0;
  688. } else {
  689. inner = 0xff;
  690. outer = 0xff;
  691. c_loop = (afi_clocks-1) >> 16;
  692. }
  693. /*
  694. * rom instructions are structured as follows:
  695. *
  696. * IDLE_LOOP2: jnz cntr0, TARGET_A
  697. * IDLE_LOOP1: jnz cntr1, TARGET_B
  698. * return
  699. *
  700. * so, when doing nested loops, TARGET_A is set to IDLE_LOOP2, and
  701. * TARGET_B is set to IDLE_LOOP2 as well
  702. *
  703. * if we have no outer loop, though, then we can use IDLE_LOOP1 only,
  704. * and set TARGET_B to IDLE_LOOP1 and we skip IDLE_LOOP2 entirely
  705. *
  706. * a little confusing, but it helps save precious space in the inst_rom
  707. * and sequencer rom and keeps the delays more accurate and reduces
  708. * overhead
  709. */
  710. if (afi_clocks <= 0x100) {
  711. writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner),
  712. &sdr_rw_load_mgr_regs->load_cntr1);
  713. writel(RW_MGR_IDLE_LOOP1,
  714. &sdr_rw_load_jump_mgr_regs->load_jump_add1);
  715. writel(RW_MGR_IDLE_LOOP1, SDR_PHYGRP_RWMGRGRP_ADDRESS |
  716. RW_MGR_RUN_SINGLE_GROUP_OFFSET);
  717. } else {
  718. writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner),
  719. &sdr_rw_load_mgr_regs->load_cntr0);
  720. writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(outer),
  721. &sdr_rw_load_mgr_regs->load_cntr1);
  722. writel(RW_MGR_IDLE_LOOP2,
  723. &sdr_rw_load_jump_mgr_regs->load_jump_add0);
  724. writel(RW_MGR_IDLE_LOOP2,
  725. &sdr_rw_load_jump_mgr_regs->load_jump_add1);
  726. /* hack to get around compiler not being smart enough */
  727. if (afi_clocks <= 0x10000) {
  728. /* only need to run once */
  729. writel(RW_MGR_IDLE_LOOP2, SDR_PHYGRP_RWMGRGRP_ADDRESS |
  730. RW_MGR_RUN_SINGLE_GROUP_OFFSET);
  731. } else {
  732. do {
  733. writel(RW_MGR_IDLE_LOOP2,
  734. SDR_PHYGRP_RWMGRGRP_ADDRESS |
  735. RW_MGR_RUN_SINGLE_GROUP_OFFSET);
  736. } while (c_loop-- != 0);
  737. }
  738. }
  739. debug("%s:%d clocks=%u ... end\n", __func__, __LINE__, clocks);
  740. }
  741. static void rw_mgr_mem_initialize(void)
  742. {
  743. uint32_t r;
  744. uint32_t grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
  745. RW_MGR_RUN_SINGLE_GROUP_OFFSET;
  746. debug("%s:%d\n", __func__, __LINE__);
  747. /* The reset / cke part of initialization is broadcasted to all ranks */
  748. writel(RW_MGR_RANK_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS |
  749. RW_MGR_SET_CS_AND_ODT_MASK_OFFSET);
  750. /*
  751. * Here's how you load register for a loop
  752. * Counters are located @ 0x800
  753. * Jump address are located @ 0xC00
  754. * For both, registers 0 to 3 are selected using bits 3 and 2, like
  755. * in 0x800, 0x804, 0x808, 0x80C and 0xC00, 0xC04, 0xC08, 0xC0C
  756. * I know this ain't pretty, but Avalon bus throws away the 2 least
  757. * significant bits
  758. */
  759. /* start with memory RESET activated */
  760. /* tINIT = 200us */
  761. /*
  762. * 200us @ 266MHz (3.75 ns) ~ 54000 clock cycles
  763. * If a and b are the number of iteration in 2 nested loops
  764. * it takes the following number of cycles to complete the operation:
  765. * number_of_cycles = ((2 + n) * a + 2) * b
  766. * where n is the number of instruction in the inner loop
  767. * One possible solution is n = 0 , a = 256 , b = 106 => a = FF,
  768. * b = 6A
  769. */
  770. /* Load counters */
  771. writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TINIT_CNTR0_VAL),
  772. &sdr_rw_load_mgr_regs->load_cntr0);
  773. writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TINIT_CNTR1_VAL),
  774. &sdr_rw_load_mgr_regs->load_cntr1);
  775. writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TINIT_CNTR2_VAL),
  776. &sdr_rw_load_mgr_regs->load_cntr2);
  777. /* Load jump address */
  778. writel(RW_MGR_INIT_RESET_0_CKE_0,
  779. &sdr_rw_load_jump_mgr_regs->load_jump_add0);
  780. writel(RW_MGR_INIT_RESET_0_CKE_0,
  781. &sdr_rw_load_jump_mgr_regs->load_jump_add1);
  782. writel(RW_MGR_INIT_RESET_0_CKE_0,
  783. &sdr_rw_load_jump_mgr_regs->load_jump_add2);
  784. /* Execute count instruction */
  785. writel(RW_MGR_INIT_RESET_0_CKE_0, grpaddr);
  786. /* indicate that memory is stable */
  787. writel(1, &phy_mgr_cfg->reset_mem_stbl);
  788. /*
  789. * transition the RESET to high
  790. * Wait for 500us
  791. */
  792. /*
  793. * 500us @ 266MHz (3.75 ns) ~ 134000 clock cycles
  794. * If a and b are the number of iteration in 2 nested loops
  795. * it takes the following number of cycles to complete the operation
  796. * number_of_cycles = ((2 + n) * a + 2) * b
  797. * where n is the number of instruction in the inner loop
  798. * One possible solution is n = 2 , a = 131 , b = 256 => a = 83,
  799. * b = FF
  800. */
  801. /* Load counters */
  802. writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TRESET_CNTR0_VAL),
  803. &sdr_rw_load_mgr_regs->load_cntr0);
  804. writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TRESET_CNTR1_VAL),
  805. &sdr_rw_load_mgr_regs->load_cntr1);
  806. writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TRESET_CNTR2_VAL),
  807. &sdr_rw_load_mgr_regs->load_cntr2);
  808. /* Load jump address */
  809. writel(RW_MGR_INIT_RESET_1_CKE_0,
  810. &sdr_rw_load_jump_mgr_regs->load_jump_add0);
  811. writel(RW_MGR_INIT_RESET_1_CKE_0,
  812. &sdr_rw_load_jump_mgr_regs->load_jump_add1);
  813. writel(RW_MGR_INIT_RESET_1_CKE_0,
  814. &sdr_rw_load_jump_mgr_regs->load_jump_add2);
  815. writel(RW_MGR_INIT_RESET_1_CKE_0, grpaddr);
  816. /* bring up clock enable */
  817. /* tXRP < 250 ck cycles */
  818. delay_for_n_mem_clocks(250);
  819. for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) {
  820. if (param->skip_ranks[r]) {
  821. /* request to skip the rank */
  822. continue;
  823. }
  824. /* set rank */
  825. set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
  826. /*
  827. * USER Use Mirror-ed commands for odd ranks if address
  828. * mirrorring is on
  829. */
  830. if ((RW_MGR_MEM_ADDRESS_MIRRORING >> r) & 0x1) {
  831. set_jump_as_return();
  832. writel(RW_MGR_MRS2_MIRR, grpaddr);
  833. delay_for_n_mem_clocks(4);
  834. set_jump_as_return();
  835. writel(RW_MGR_MRS3_MIRR, grpaddr);
  836. delay_for_n_mem_clocks(4);
  837. set_jump_as_return();
  838. writel(RW_MGR_MRS1_MIRR, grpaddr);
  839. delay_for_n_mem_clocks(4);
  840. set_jump_as_return();
  841. writel(RW_MGR_MRS0_DLL_RESET_MIRR, grpaddr);
  842. } else {
  843. set_jump_as_return();
  844. writel(RW_MGR_MRS2, grpaddr);
  845. delay_for_n_mem_clocks(4);
  846. set_jump_as_return();
  847. writel(RW_MGR_MRS3, grpaddr);
  848. delay_for_n_mem_clocks(4);
  849. set_jump_as_return();
  850. writel(RW_MGR_MRS1, grpaddr);
  851. set_jump_as_return();
  852. writel(RW_MGR_MRS0_DLL_RESET, grpaddr);
  853. }
  854. set_jump_as_return();
  855. writel(RW_MGR_ZQCL, grpaddr);
  856. /* tZQinit = tDLLK = 512 ck cycles */
  857. delay_for_n_mem_clocks(512);
  858. }
  859. }
  860. /*
  861. * At the end of calibration we have to program the user settings in, and
  862. * USER hand off the memory to the user.
  863. */
  864. static void rw_mgr_mem_handoff(void)
  865. {
  866. uint32_t r;
  867. uint32_t grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
  868. RW_MGR_RUN_SINGLE_GROUP_OFFSET;
  869. debug("%s:%d\n", __func__, __LINE__);
  870. for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) {
  871. if (param->skip_ranks[r])
  872. /* request to skip the rank */
  873. continue;
  874. /* set rank */
  875. set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
  876. /* precharge all banks ... */
  877. writel(RW_MGR_PRECHARGE_ALL, grpaddr);
  878. /* load up MR settings specified by user */
  879. /*
  880. * Use Mirror-ed commands for odd ranks if address
  881. * mirrorring is on
  882. */
  883. if ((RW_MGR_MEM_ADDRESS_MIRRORING >> r) & 0x1) {
  884. set_jump_as_return();
  885. writel(RW_MGR_MRS2_MIRR, grpaddr);
  886. delay_for_n_mem_clocks(4);
  887. set_jump_as_return();
  888. writel(RW_MGR_MRS3_MIRR, grpaddr);
  889. delay_for_n_mem_clocks(4);
  890. set_jump_as_return();
  891. writel(RW_MGR_MRS1_MIRR, grpaddr);
  892. delay_for_n_mem_clocks(4);
  893. set_jump_as_return();
  894. writel(RW_MGR_MRS0_USER_MIRR, grpaddr);
  895. } else {
  896. set_jump_as_return();
  897. writel(RW_MGR_MRS2, grpaddr);
  898. delay_for_n_mem_clocks(4);
  899. set_jump_as_return();
  900. writel(RW_MGR_MRS3, grpaddr);
  901. delay_for_n_mem_clocks(4);
  902. set_jump_as_return();
  903. writel(RW_MGR_MRS1, grpaddr);
  904. delay_for_n_mem_clocks(4);
  905. set_jump_as_return();
  906. writel(RW_MGR_MRS0_USER, grpaddr);
  907. }
  908. /*
  909. * USER need to wait tMOD (12CK or 15ns) time before issuing
  910. * other commands, but we will have plenty of NIOS cycles before
  911. * actual handoff so its okay.
  912. */
  913. }
  914. }
  915. /*
  916. * performs a guaranteed read on the patterns we are going to use during a
  917. * read test to ensure memory works
  918. */
  919. static uint32_t rw_mgr_mem_calibrate_read_test_patterns(uint32_t rank_bgn,
  920. uint32_t group, uint32_t num_tries, uint32_t *bit_chk,
  921. uint32_t all_ranks)
  922. {
  923. uint32_t r, vg;
  924. uint32_t correct_mask_vg;
  925. uint32_t tmp_bit_chk;
  926. uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
  927. (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
  928. uint32_t addr;
  929. uint32_t base_rw_mgr;
  930. *bit_chk = param->read_correct_mask;
  931. correct_mask_vg = param->read_correct_mask_vg;
  932. for (r = rank_bgn; r < rank_end; r++) {
  933. if (param->skip_ranks[r])
  934. /* request to skip the rank */
  935. continue;
  936. /* set rank */
  937. set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
  938. /* Load up a constant bursts of read commands */
  939. writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
  940. writel(RW_MGR_GUARANTEED_READ,
  941. &sdr_rw_load_jump_mgr_regs->load_jump_add0);
  942. writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
  943. writel(RW_MGR_GUARANTEED_READ_CONT,
  944. &sdr_rw_load_jump_mgr_regs->load_jump_add1);
  945. tmp_bit_chk = 0;
  946. for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS-1; ; vg--) {
  947. /* reset the fifos to get pointers to known state */
  948. writel(0, &phy_mgr_cmd->fifo_reset);
  949. writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
  950. RW_MGR_RESET_READ_DATAPATH_OFFSET);
  951. tmp_bit_chk = tmp_bit_chk << (RW_MGR_MEM_DQ_PER_READ_DQS
  952. / RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS);
  953. addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
  954. writel(RW_MGR_GUARANTEED_READ, addr +
  955. ((group * RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS +
  956. vg) << 2));
  957. base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
  958. tmp_bit_chk = tmp_bit_chk | (correct_mask_vg & (~base_rw_mgr));
  959. if (vg == 0)
  960. break;
  961. }
  962. *bit_chk &= tmp_bit_chk;
  963. }
  964. addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
  965. writel(RW_MGR_CLEAR_DQS_ENABLE, addr + (group << 2));
  966. set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
  967. debug_cond(DLEVEL == 1, "%s:%d test_load_patterns(%u,ALL) => (%u == %u) =>\
  968. %lu\n", __func__, __LINE__, group, *bit_chk, param->read_correct_mask,
  969. (long unsigned int)(*bit_chk == param->read_correct_mask));
  970. return *bit_chk == param->read_correct_mask;
  971. }
  972. static uint32_t rw_mgr_mem_calibrate_read_test_patterns_all_ranks
  973. (uint32_t group, uint32_t num_tries, uint32_t *bit_chk)
  974. {
  975. return rw_mgr_mem_calibrate_read_test_patterns(0, group,
  976. num_tries, bit_chk, 1);
  977. }
  978. /* load up the patterns we are going to use during a read test */
  979. static void rw_mgr_mem_calibrate_read_load_patterns(uint32_t rank_bgn,
  980. uint32_t all_ranks)
  981. {
  982. uint32_t r;
  983. uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
  984. (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
  985. debug("%s:%d\n", __func__, __LINE__);
  986. for (r = rank_bgn; r < rank_end; r++) {
  987. if (param->skip_ranks[r])
  988. /* request to skip the rank */
  989. continue;
  990. /* set rank */
  991. set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
  992. /* Load up a constant bursts */
  993. writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
  994. writel(RW_MGR_GUARANTEED_WRITE_WAIT0,
  995. &sdr_rw_load_jump_mgr_regs->load_jump_add0);
  996. writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
  997. writel(RW_MGR_GUARANTEED_WRITE_WAIT1,
  998. &sdr_rw_load_jump_mgr_regs->load_jump_add1);
  999. writel(0x04, &sdr_rw_load_mgr_regs->load_cntr2);
  1000. writel(RW_MGR_GUARANTEED_WRITE_WAIT2,
  1001. &sdr_rw_load_jump_mgr_regs->load_jump_add2);
  1002. writel(0x04, &sdr_rw_load_mgr_regs->load_cntr3);
  1003. writel(RW_MGR_GUARANTEED_WRITE_WAIT3,
  1004. &sdr_rw_load_jump_mgr_regs->load_jump_add3);
  1005. writel(RW_MGR_GUARANTEED_WRITE, SDR_PHYGRP_RWMGRGRP_ADDRESS |
  1006. RW_MGR_RUN_SINGLE_GROUP_OFFSET);
  1007. }
  1008. set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
  1009. }
  1010. /*
  1011. * try a read and see if it returns correct data back. has dummy reads
  1012. * inserted into the mix used to align dqs enable. has more thorough checks
  1013. * than the regular read test.
  1014. */
  1015. static uint32_t rw_mgr_mem_calibrate_read_test(uint32_t rank_bgn, uint32_t group,
  1016. uint32_t num_tries, uint32_t all_correct, uint32_t *bit_chk,
  1017. uint32_t all_groups, uint32_t all_ranks)
  1018. {
  1019. uint32_t r, vg;
  1020. uint32_t correct_mask_vg;
  1021. uint32_t tmp_bit_chk;
  1022. uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
  1023. (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
  1024. uint32_t addr;
  1025. uint32_t base_rw_mgr;
  1026. *bit_chk = param->read_correct_mask;
  1027. correct_mask_vg = param->read_correct_mask_vg;
  1028. uint32_t quick_read_mode = (((STATIC_CALIB_STEPS) &
  1029. CALIB_SKIP_DELAY_SWEEPS) && ENABLE_SUPER_QUICK_CALIBRATION);
  1030. for (r = rank_bgn; r < rank_end; r++) {
  1031. if (param->skip_ranks[r])
  1032. /* request to skip the rank */
  1033. continue;
  1034. /* set rank */
  1035. set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
  1036. writel(0x10, &sdr_rw_load_mgr_regs->load_cntr1);
  1037. writel(RW_MGR_READ_B2B_WAIT1,
  1038. &sdr_rw_load_jump_mgr_regs->load_jump_add1);
  1039. writel(0x10, &sdr_rw_load_mgr_regs->load_cntr2);
  1040. writel(RW_MGR_READ_B2B_WAIT2,
  1041. &sdr_rw_load_jump_mgr_regs->load_jump_add2);
  1042. if (quick_read_mode)
  1043. writel(0x1, &sdr_rw_load_mgr_regs->load_cntr0);
  1044. /* need at least two (1+1) reads to capture failures */
  1045. else if (all_groups)
  1046. writel(0x06, &sdr_rw_load_mgr_regs->load_cntr0);
  1047. else
  1048. writel(0x32, &sdr_rw_load_mgr_regs->load_cntr0);
  1049. writel(RW_MGR_READ_B2B,
  1050. &sdr_rw_load_jump_mgr_regs->load_jump_add0);
  1051. if (all_groups)
  1052. writel(RW_MGR_MEM_IF_READ_DQS_WIDTH *
  1053. RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS - 1,
  1054. &sdr_rw_load_mgr_regs->load_cntr3);
  1055. else
  1056. writel(0x0, &sdr_rw_load_mgr_regs->load_cntr3);
  1057. writel(RW_MGR_READ_B2B,
  1058. &sdr_rw_load_jump_mgr_regs->load_jump_add3);
  1059. tmp_bit_chk = 0;
  1060. for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS-1; ; vg--) {
  1061. /* reset the fifos to get pointers to known state */
  1062. writel(0, &phy_mgr_cmd->fifo_reset);
  1063. writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
  1064. RW_MGR_RESET_READ_DATAPATH_OFFSET);
  1065. tmp_bit_chk = tmp_bit_chk << (RW_MGR_MEM_DQ_PER_READ_DQS
  1066. / RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS);
  1067. if (all_groups)
  1068. addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_ALL_GROUPS_OFFSET;
  1069. else
  1070. addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
  1071. writel(RW_MGR_READ_B2B, addr +
  1072. ((group * RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS +
  1073. vg) << 2));
  1074. base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
  1075. tmp_bit_chk = tmp_bit_chk | (correct_mask_vg & ~(base_rw_mgr));
  1076. if (vg == 0)
  1077. break;
  1078. }
  1079. *bit_chk &= tmp_bit_chk;
  1080. }
  1081. addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
  1082. writel(RW_MGR_CLEAR_DQS_ENABLE, addr + (group << 2));
  1083. if (all_correct) {
  1084. set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
  1085. debug_cond(DLEVEL == 2, "%s:%d read_test(%u,ALL,%u) =>\
  1086. (%u == %u) => %lu", __func__, __LINE__, group,
  1087. all_groups, *bit_chk, param->read_correct_mask,
  1088. (long unsigned int)(*bit_chk ==
  1089. param->read_correct_mask));
  1090. return *bit_chk == param->read_correct_mask;
  1091. } else {
  1092. set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
  1093. debug_cond(DLEVEL == 2, "%s:%d read_test(%u,ONE,%u) =>\
  1094. (%u != %lu) => %lu\n", __func__, __LINE__,
  1095. group, all_groups, *bit_chk, (long unsigned int)0,
  1096. (long unsigned int)(*bit_chk != 0x00));
  1097. return *bit_chk != 0x00;
  1098. }
  1099. }
  1100. static uint32_t rw_mgr_mem_calibrate_read_test_all_ranks(uint32_t group,
  1101. uint32_t num_tries, uint32_t all_correct, uint32_t *bit_chk,
  1102. uint32_t all_groups)
  1103. {
  1104. return rw_mgr_mem_calibrate_read_test(0, group, num_tries, all_correct,
  1105. bit_chk, all_groups, 1);
  1106. }
  1107. static void rw_mgr_incr_vfifo(uint32_t grp, uint32_t *v)
  1108. {
  1109. writel(grp, &phy_mgr_cmd->inc_vfifo_hard_phy);
  1110. (*v)++;
  1111. }
  1112. static void rw_mgr_decr_vfifo(uint32_t grp, uint32_t *v)
  1113. {
  1114. uint32_t i;
  1115. for (i = 0; i < VFIFO_SIZE-1; i++)
  1116. rw_mgr_incr_vfifo(grp, v);
  1117. }
  1118. static int find_vfifo_read(uint32_t grp, uint32_t *bit_chk)
  1119. {
  1120. uint32_t v;
  1121. uint32_t fail_cnt = 0;
  1122. uint32_t test_status;
  1123. for (v = 0; v < VFIFO_SIZE; ) {
  1124. debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: vfifo %u\n",
  1125. __func__, __LINE__, v);
  1126. test_status = rw_mgr_mem_calibrate_read_test_all_ranks
  1127. (grp, 1, PASS_ONE_BIT, bit_chk, 0);
  1128. if (!test_status) {
  1129. fail_cnt++;
  1130. if (fail_cnt == 2)
  1131. break;
  1132. }
  1133. /* fiddle with FIFO */
  1134. rw_mgr_incr_vfifo(grp, &v);
  1135. }
  1136. if (v >= VFIFO_SIZE) {
  1137. /* no failing read found!! Something must have gone wrong */
  1138. debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: vfifo failed\n",
  1139. __func__, __LINE__);
  1140. return 0;
  1141. } else {
  1142. return v;
  1143. }
  1144. }
  1145. static int find_working_phase(uint32_t *grp, uint32_t *bit_chk,
  1146. uint32_t dtaps_per_ptap, uint32_t *work_bgn,
  1147. uint32_t *v, uint32_t *d, uint32_t *p,
  1148. uint32_t *i, uint32_t *max_working_cnt)
  1149. {
  1150. uint32_t found_begin = 0;
  1151. uint32_t tmp_delay = 0;
  1152. uint32_t test_status;
  1153. for (*d = 0; *d <= dtaps_per_ptap; (*d)++, tmp_delay +=
  1154. IO_DELAY_PER_DQS_EN_DCHAIN_TAP) {
  1155. *work_bgn = tmp_delay;
  1156. scc_mgr_set_dqs_en_delay_all_ranks(*grp, *d);
  1157. for (*i = 0; *i < VFIFO_SIZE; (*i)++) {
  1158. for (*p = 0; *p <= IO_DQS_EN_PHASE_MAX; (*p)++, *work_bgn +=
  1159. IO_DELAY_PER_OPA_TAP) {
  1160. scc_mgr_set_dqs_en_phase_all_ranks(*grp, *p);
  1161. test_status =
  1162. rw_mgr_mem_calibrate_read_test_all_ranks
  1163. (*grp, 1, PASS_ONE_BIT, bit_chk, 0);
  1164. if (test_status) {
  1165. *max_working_cnt = 1;
  1166. found_begin = 1;
  1167. break;
  1168. }
  1169. }
  1170. if (found_begin)
  1171. break;
  1172. if (*p > IO_DQS_EN_PHASE_MAX)
  1173. /* fiddle with FIFO */
  1174. rw_mgr_incr_vfifo(*grp, v);
  1175. }
  1176. if (found_begin)
  1177. break;
  1178. }
  1179. if (*i >= VFIFO_SIZE) {
  1180. /* cannot find working solution */
  1181. debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: no vfifo/\
  1182. ptap/dtap\n", __func__, __LINE__);
  1183. return 0;
  1184. } else {
  1185. return 1;
  1186. }
  1187. }
  1188. static void sdr_backup_phase(uint32_t *grp, uint32_t *bit_chk,
  1189. uint32_t *work_bgn, uint32_t *v, uint32_t *d,
  1190. uint32_t *p, uint32_t *max_working_cnt)
  1191. {
  1192. uint32_t found_begin = 0;
  1193. uint32_t tmp_delay;
  1194. /* Special case code for backing up a phase */
  1195. if (*p == 0) {
  1196. *p = IO_DQS_EN_PHASE_MAX;
  1197. rw_mgr_decr_vfifo(*grp, v);
  1198. } else {
  1199. (*p)--;
  1200. }
  1201. tmp_delay = *work_bgn - IO_DELAY_PER_OPA_TAP;
  1202. scc_mgr_set_dqs_en_phase_all_ranks(*grp, *p);
  1203. for (*d = 0; *d <= IO_DQS_EN_DELAY_MAX && tmp_delay < *work_bgn;
  1204. (*d)++, tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP) {
  1205. scc_mgr_set_dqs_en_delay_all_ranks(*grp, *d);
  1206. if (rw_mgr_mem_calibrate_read_test_all_ranks(*grp, 1,
  1207. PASS_ONE_BIT,
  1208. bit_chk, 0)) {
  1209. found_begin = 1;
  1210. *work_bgn = tmp_delay;
  1211. break;
  1212. }
  1213. }
  1214. /* We have found a working dtap before the ptap found above */
  1215. if (found_begin == 1)
  1216. (*max_working_cnt)++;
  1217. /*
  1218. * Restore VFIFO to old state before we decremented it
  1219. * (if needed).
  1220. */
  1221. (*p)++;
  1222. if (*p > IO_DQS_EN_PHASE_MAX) {
  1223. *p = 0;
  1224. rw_mgr_incr_vfifo(*grp, v);
  1225. }
  1226. scc_mgr_set_dqs_en_delay_all_ranks(*grp, 0);
  1227. }
  1228. static int sdr_nonworking_phase(uint32_t *grp, uint32_t *bit_chk,
  1229. uint32_t *work_bgn, uint32_t *v, uint32_t *d,
  1230. uint32_t *p, uint32_t *i, uint32_t *max_working_cnt,
  1231. uint32_t *work_end)
  1232. {
  1233. uint32_t found_end = 0;
  1234. (*p)++;
  1235. *work_end += IO_DELAY_PER_OPA_TAP;
  1236. if (*p > IO_DQS_EN_PHASE_MAX) {
  1237. /* fiddle with FIFO */
  1238. *p = 0;
  1239. rw_mgr_incr_vfifo(*grp, v);
  1240. }
  1241. for (; *i < VFIFO_SIZE + 1; (*i)++) {
  1242. for (; *p <= IO_DQS_EN_PHASE_MAX; (*p)++, *work_end
  1243. += IO_DELAY_PER_OPA_TAP) {
  1244. scc_mgr_set_dqs_en_phase_all_ranks(*grp, *p);
  1245. if (!rw_mgr_mem_calibrate_read_test_all_ranks
  1246. (*grp, 1, PASS_ONE_BIT, bit_chk, 0)) {
  1247. found_end = 1;
  1248. break;
  1249. } else {
  1250. (*max_working_cnt)++;
  1251. }
  1252. }
  1253. if (found_end)
  1254. break;
  1255. if (*p > IO_DQS_EN_PHASE_MAX) {
  1256. /* fiddle with FIFO */
  1257. rw_mgr_incr_vfifo(*grp, v);
  1258. *p = 0;
  1259. }
  1260. }
  1261. if (*i >= VFIFO_SIZE + 1) {
  1262. /* cannot see edge of failing read */
  1263. debug_cond(DLEVEL == 2, "%s:%d sdr_nonworking_phase: end:\
  1264. failed\n", __func__, __LINE__);
  1265. return 0;
  1266. } else {
  1267. return 1;
  1268. }
  1269. }
  1270. static int sdr_find_window_centre(uint32_t *grp, uint32_t *bit_chk,
  1271. uint32_t *work_bgn, uint32_t *v, uint32_t *d,
  1272. uint32_t *p, uint32_t *work_mid,
  1273. uint32_t *work_end)
  1274. {
  1275. int i;
  1276. int tmp_delay = 0;
  1277. *work_mid = (*work_bgn + *work_end) / 2;
  1278. debug_cond(DLEVEL == 2, "work_bgn=%d work_end=%d work_mid=%d\n",
  1279. *work_bgn, *work_end, *work_mid);
  1280. /* Get the middle delay to be less than a VFIFO delay */
  1281. for (*p = 0; *p <= IO_DQS_EN_PHASE_MAX;
  1282. (*p)++, tmp_delay += IO_DELAY_PER_OPA_TAP)
  1283. ;
  1284. debug_cond(DLEVEL == 2, "vfifo ptap delay %d\n", tmp_delay);
  1285. while (*work_mid > tmp_delay)
  1286. *work_mid -= tmp_delay;
  1287. debug_cond(DLEVEL == 2, "new work_mid %d\n", *work_mid);
  1288. tmp_delay = 0;
  1289. for (*p = 0; *p <= IO_DQS_EN_PHASE_MAX && tmp_delay < *work_mid;
  1290. (*p)++, tmp_delay += IO_DELAY_PER_OPA_TAP)
  1291. ;
  1292. tmp_delay -= IO_DELAY_PER_OPA_TAP;
  1293. debug_cond(DLEVEL == 2, "new p %d, tmp_delay=%d\n", (*p) - 1, tmp_delay);
  1294. for (*d = 0; *d <= IO_DQS_EN_DELAY_MAX && tmp_delay < *work_mid; (*d)++,
  1295. tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP)
  1296. ;
  1297. debug_cond(DLEVEL == 2, "new d %d, tmp_delay=%d\n", *d, tmp_delay);
  1298. scc_mgr_set_dqs_en_phase_all_ranks(*grp, (*p) - 1);
  1299. scc_mgr_set_dqs_en_delay_all_ranks(*grp, *d);
  1300. /*
  1301. * push vfifo until we can successfully calibrate. We can do this
  1302. * because the largest possible margin in 1 VFIFO cycle.
  1303. */
  1304. for (i = 0; i < VFIFO_SIZE; i++) {
  1305. debug_cond(DLEVEL == 2, "find_dqs_en_phase: center: vfifo=%u\n",
  1306. *v);
  1307. if (rw_mgr_mem_calibrate_read_test_all_ranks(*grp, 1,
  1308. PASS_ONE_BIT,
  1309. bit_chk, 0)) {
  1310. break;
  1311. }
  1312. /* fiddle with FIFO */
  1313. rw_mgr_incr_vfifo(*grp, v);
  1314. }
  1315. if (i >= VFIFO_SIZE) {
  1316. debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: center: \
  1317. failed\n", __func__, __LINE__);
  1318. return 0;
  1319. } else {
  1320. return 1;
  1321. }
  1322. }
  1323. /* find a good dqs enable to use */
  1324. static uint32_t rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(uint32_t grp)
  1325. {
  1326. uint32_t v, d, p, i;
  1327. uint32_t max_working_cnt;
  1328. uint32_t bit_chk;
  1329. uint32_t dtaps_per_ptap;
  1330. uint32_t work_bgn, work_mid, work_end;
  1331. uint32_t found_passing_read, found_failing_read, initial_failing_dtap;
  1332. debug("%s:%d %u\n", __func__, __LINE__, grp);
  1333. reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER);
  1334. scc_mgr_set_dqs_en_delay_all_ranks(grp, 0);
  1335. scc_mgr_set_dqs_en_phase_all_ranks(grp, 0);
  1336. /* ************************************************************** */
  1337. /* * Step 0 : Determine number of delay taps for each phase tap * */
  1338. dtaps_per_ptap = IO_DELAY_PER_OPA_TAP/IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
  1339. /* ********************************************************* */
  1340. /* * Step 1 : First push vfifo until we get a failing read * */
  1341. v = find_vfifo_read(grp, &bit_chk);
  1342. max_working_cnt = 0;
  1343. /* ******************************************************** */
  1344. /* * step 2: find first working phase, increment in ptaps * */
  1345. work_bgn = 0;
  1346. if (find_working_phase(&grp, &bit_chk, dtaps_per_ptap, &work_bgn, &v, &d,
  1347. &p, &i, &max_working_cnt) == 0)
  1348. return 0;
  1349. work_end = work_bgn;
  1350. /*
  1351. * If d is 0 then the working window covers a phase tap and
  1352. * we can follow the old procedure otherwise, we've found the beginning,
  1353. * and we need to increment the dtaps until we find the end.
  1354. */
  1355. if (d == 0) {
  1356. /* ********************************************************* */
  1357. /* * step 3a: if we have room, back off by one and
  1358. increment in dtaps * */
  1359. sdr_backup_phase(&grp, &bit_chk, &work_bgn, &v, &d, &p,
  1360. &max_working_cnt);
  1361. /* ********************************************************* */
  1362. /* * step 4a: go forward from working phase to non working
  1363. phase, increment in ptaps * */
  1364. if (sdr_nonworking_phase(&grp, &bit_chk, &work_bgn, &v, &d, &p,
  1365. &i, &max_working_cnt, &work_end) == 0)
  1366. return 0;
  1367. /* ********************************************************* */
  1368. /* * step 5a: back off one from last, increment in dtaps * */
  1369. /* Special case code for backing up a phase */
  1370. if (p == 0) {
  1371. p = IO_DQS_EN_PHASE_MAX;
  1372. rw_mgr_decr_vfifo(grp, &v);
  1373. } else {
  1374. p = p - 1;
  1375. }
  1376. work_end -= IO_DELAY_PER_OPA_TAP;
  1377. scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
  1378. /* * The actual increment of dtaps is done outside of
  1379. the if/else loop to share code */
  1380. d = 0;
  1381. debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: v/p: \
  1382. vfifo=%u ptap=%u\n", __func__, __LINE__,
  1383. v, p);
  1384. } else {
  1385. /* ******************************************************* */
  1386. /* * step 3-5b: Find the right edge of the window using
  1387. delay taps * */
  1388. debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase:vfifo=%u \
  1389. ptap=%u dtap=%u bgn=%u\n", __func__, __LINE__,
  1390. v, p, d, work_bgn);
  1391. work_end = work_bgn;
  1392. /* * The actual increment of dtaps is done outside of the
  1393. if/else loop to share code */
  1394. /* Only here to counterbalance a subtract later on which is
  1395. not needed if this branch of the algorithm is taken */
  1396. max_working_cnt++;
  1397. }
  1398. /* The dtap increment to find the failing edge is done here */
  1399. for (; d <= IO_DQS_EN_DELAY_MAX; d++, work_end +=
  1400. IO_DELAY_PER_DQS_EN_DCHAIN_TAP) {
  1401. debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: \
  1402. end-2: dtap=%u\n", __func__, __LINE__, d);
  1403. scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
  1404. if (!rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
  1405. PASS_ONE_BIT,
  1406. &bit_chk, 0)) {
  1407. break;
  1408. }
  1409. }
  1410. /* Go back to working dtap */
  1411. if (d != 0)
  1412. work_end -= IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
  1413. debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: v/p/d: vfifo=%u \
  1414. ptap=%u dtap=%u end=%u\n", __func__, __LINE__,
  1415. v, p, d-1, work_end);
  1416. if (work_end < work_bgn) {
  1417. /* nil range */
  1418. debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: end-2: \
  1419. failed\n", __func__, __LINE__);
  1420. return 0;
  1421. }
  1422. debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: found range [%u,%u]\n",
  1423. __func__, __LINE__, work_bgn, work_end);
  1424. /* *************************************************************** */
  1425. /*
  1426. * * We need to calculate the number of dtaps that equal a ptap
  1427. * * To do that we'll back up a ptap and re-find the edge of the
  1428. * * window using dtaps
  1429. */
  1430. debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: calculate dtaps_per_ptap \
  1431. for tracking\n", __func__, __LINE__);
  1432. /* Special case code for backing up a phase */
  1433. if (p == 0) {
  1434. p = IO_DQS_EN_PHASE_MAX;
  1435. rw_mgr_decr_vfifo(grp, &v);
  1436. debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: backedup \
  1437. cycle/phase: v=%u p=%u\n", __func__, __LINE__,
  1438. v, p);
  1439. } else {
  1440. p = p - 1;
  1441. debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: backedup \
  1442. phase only: v=%u p=%u", __func__, __LINE__,
  1443. v, p);
  1444. }
  1445. scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
  1446. /*
  1447. * Increase dtap until we first see a passing read (in case the
  1448. * window is smaller than a ptap),
  1449. * and then a failing read to mark the edge of the window again
  1450. */
  1451. /* Find a passing read */
  1452. debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: find passing read\n",
  1453. __func__, __LINE__);
  1454. found_passing_read = 0;
  1455. found_failing_read = 0;
  1456. initial_failing_dtap = d;
  1457. for (; d <= IO_DQS_EN_DELAY_MAX; d++) {
  1458. debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: testing \
  1459. read d=%u\n", __func__, __LINE__, d);
  1460. scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
  1461. if (rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
  1462. PASS_ONE_BIT,
  1463. &bit_chk, 0)) {
  1464. found_passing_read = 1;
  1465. break;
  1466. }
  1467. }
  1468. if (found_passing_read) {
  1469. /* Find a failing read */
  1470. debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: find failing \
  1471. read\n", __func__, __LINE__);
  1472. for (d = d + 1; d <= IO_DQS_EN_DELAY_MAX; d++) {
  1473. debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: \
  1474. testing read d=%u\n", __func__, __LINE__, d);
  1475. scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
  1476. if (!rw_mgr_mem_calibrate_read_test_all_ranks
  1477. (grp, 1, PASS_ONE_BIT, &bit_chk, 0)) {
  1478. found_failing_read = 1;
  1479. break;
  1480. }
  1481. }
  1482. } else {
  1483. debug_cond(DLEVEL == 1, "%s:%d find_dqs_en_phase: failed to \
  1484. calculate dtaps", __func__, __LINE__);
  1485. debug_cond(DLEVEL == 1, "per ptap. Fall back on static value\n");
  1486. }
  1487. /*
  1488. * The dynamically calculated dtaps_per_ptap is only valid if we
  1489. * found a passing/failing read. If we didn't, it means d hit the max
  1490. * (IO_DQS_EN_DELAY_MAX). Otherwise, dtaps_per_ptap retains its
  1491. * statically calculated value.
  1492. */
  1493. if (found_passing_read && found_failing_read)
  1494. dtaps_per_ptap = d - initial_failing_dtap;
  1495. writel(dtaps_per_ptap, &sdr_reg_file->dtaps_per_ptap);
  1496. debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: dtaps_per_ptap=%u \
  1497. - %u = %u", __func__, __LINE__, d,
  1498. initial_failing_dtap, dtaps_per_ptap);
  1499. /* ******************************************** */
  1500. /* * step 6: Find the centre of the window * */
  1501. if (sdr_find_window_centre(&grp, &bit_chk, &work_bgn, &v, &d, &p,
  1502. &work_mid, &work_end) == 0)
  1503. return 0;
  1504. debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: center found: \
  1505. vfifo=%u ptap=%u dtap=%u\n", __func__, __LINE__,
  1506. v, p-1, d);
  1507. return 1;
  1508. }
  1509. /*
  1510. * Try rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase across different
  1511. * dq_in_delay values
  1512. */
  1513. static uint32_t
  1514. rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase_sweep_dq_in_delay
  1515. (uint32_t write_group, uint32_t read_group, uint32_t test_bgn)
  1516. {
  1517. uint32_t found;
  1518. uint32_t i;
  1519. uint32_t p;
  1520. uint32_t d;
  1521. uint32_t r;
  1522. const uint32_t delay_step = IO_IO_IN_DELAY_MAX /
  1523. (RW_MGR_MEM_DQ_PER_READ_DQS-1);
  1524. /* we start at zero, so have one less dq to devide among */
  1525. debug("%s:%d (%u,%u,%u)", __func__, __LINE__, write_group, read_group,
  1526. test_bgn);
  1527. /* try different dq_in_delays since the dq path is shorter than dqs */
  1528. for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
  1529. r += NUM_RANKS_PER_SHADOW_REG) {
  1530. for (i = 0, p = test_bgn, d = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS;
  1531. i++, p++, d += delay_step) {
  1532. debug_cond(DLEVEL == 1, "%s:%d rw_mgr_mem_calibrate_\
  1533. vfifo_find_dqs_", __func__, __LINE__);
  1534. debug_cond(DLEVEL == 1, "en_phase_sweep_dq_in_delay: g=%u/%u ",
  1535. write_group, read_group);
  1536. debug_cond(DLEVEL == 1, "r=%u, i=%u p=%u d=%u\n", r, i , p, d);
  1537. scc_mgr_set_dq_in_delay(p, d);
  1538. scc_mgr_load_dq(p);
  1539. }
  1540. writel(0, &sdr_scc_mgr->update);
  1541. }
  1542. found = rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(read_group);
  1543. debug_cond(DLEVEL == 1, "%s:%d rw_mgr_mem_calibrate_vfifo_find_dqs_\
  1544. en_phase_sweep_dq", __func__, __LINE__);
  1545. debug_cond(DLEVEL == 1, "_in_delay: g=%u/%u found=%u; Reseting delay \
  1546. chain to zero\n", write_group, read_group, found);
  1547. for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
  1548. r += NUM_RANKS_PER_SHADOW_REG) {
  1549. for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS;
  1550. i++, p++) {
  1551. scc_mgr_set_dq_in_delay(p, 0);
  1552. scc_mgr_load_dq(p);
  1553. }
  1554. writel(0, &sdr_scc_mgr->update);
  1555. }
  1556. return found;
  1557. }
  1558. /* per-bit deskew DQ and center */
  1559. static uint32_t rw_mgr_mem_calibrate_vfifo_center(uint32_t rank_bgn,
  1560. uint32_t write_group, uint32_t read_group, uint32_t test_bgn,
  1561. uint32_t use_read_test, uint32_t update_fom)
  1562. {
  1563. uint32_t i, p, d, min_index;
  1564. /*
  1565. * Store these as signed since there are comparisons with
  1566. * signed numbers.
  1567. */
  1568. uint32_t bit_chk;
  1569. uint32_t sticky_bit_chk;
  1570. int32_t left_edge[RW_MGR_MEM_DQ_PER_READ_DQS];
  1571. int32_t right_edge[RW_MGR_MEM_DQ_PER_READ_DQS];
  1572. int32_t final_dq[RW_MGR_MEM_DQ_PER_READ_DQS];
  1573. int32_t mid;
  1574. int32_t orig_mid_min, mid_min;
  1575. int32_t new_dqs, start_dqs, start_dqs_en, shift_dq, final_dqs,
  1576. final_dqs_en;
  1577. int32_t dq_margin, dqs_margin;
  1578. uint32_t stop;
  1579. uint32_t temp_dq_in_delay1, temp_dq_in_delay2;
  1580. uint32_t addr;
  1581. debug("%s:%d: %u %u", __func__, __LINE__, read_group, test_bgn);
  1582. addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_DQS_IN_DELAY_OFFSET;
  1583. start_dqs = readl(addr + (read_group << 2));
  1584. if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS)
  1585. start_dqs_en = readl(addr + ((read_group << 2)
  1586. - IO_DQS_EN_DELAY_OFFSET));
  1587. /* set the left and right edge of each bit to an illegal value */
  1588. /* use (IO_IO_IN_DELAY_MAX + 1) as an illegal value */
  1589. sticky_bit_chk = 0;
  1590. for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
  1591. left_edge[i] = IO_IO_IN_DELAY_MAX + 1;
  1592. right_edge[i] = IO_IO_IN_DELAY_MAX + 1;
  1593. }
  1594. /* Search for the left edge of the window for each bit */
  1595. for (d = 0; d <= IO_IO_IN_DELAY_MAX; d++) {
  1596. scc_mgr_apply_group_dq_in_delay(write_group, test_bgn, d);
  1597. writel(0, &sdr_scc_mgr->update);
  1598. /*
  1599. * Stop searching when the read test doesn't pass AND when
  1600. * we've seen a passing read on every bit.
  1601. */
  1602. if (use_read_test) {
  1603. stop = !rw_mgr_mem_calibrate_read_test(rank_bgn,
  1604. read_group, NUM_READ_PB_TESTS, PASS_ONE_BIT,
  1605. &bit_chk, 0, 0);
  1606. } else {
  1607. rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
  1608. 0, PASS_ONE_BIT,
  1609. &bit_chk, 0);
  1610. bit_chk = bit_chk >> (RW_MGR_MEM_DQ_PER_READ_DQS *
  1611. (read_group - (write_group *
  1612. RW_MGR_MEM_IF_READ_DQS_WIDTH /
  1613. RW_MGR_MEM_IF_WRITE_DQS_WIDTH)));
  1614. stop = (bit_chk == 0);
  1615. }
  1616. sticky_bit_chk = sticky_bit_chk | bit_chk;
  1617. stop = stop && (sticky_bit_chk == param->read_correct_mask);
  1618. debug_cond(DLEVEL == 2, "%s:%d vfifo_center(left): dtap=%u => %u == %u \
  1619. && %u", __func__, __LINE__, d,
  1620. sticky_bit_chk,
  1621. param->read_correct_mask, stop);
  1622. if (stop == 1) {
  1623. break;
  1624. } else {
  1625. for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
  1626. if (bit_chk & 1) {
  1627. /* Remember a passing test as the
  1628. left_edge */
  1629. left_edge[i] = d;
  1630. } else {
  1631. /* If a left edge has not been seen yet,
  1632. then a future passing test will mark
  1633. this edge as the right edge */
  1634. if (left_edge[i] ==
  1635. IO_IO_IN_DELAY_MAX + 1) {
  1636. right_edge[i] = -(d + 1);
  1637. }
  1638. }
  1639. bit_chk = bit_chk >> 1;
  1640. }
  1641. }
  1642. }
  1643. /* Reset DQ delay chains to 0 */
  1644. scc_mgr_apply_group_dq_in_delay(write_group, test_bgn, 0);
  1645. sticky_bit_chk = 0;
  1646. for (i = RW_MGR_MEM_DQ_PER_READ_DQS - 1;; i--) {
  1647. debug_cond(DLEVEL == 2, "%s:%d vfifo_center: left_edge[%u]: \
  1648. %d right_edge[%u]: %d\n", __func__, __LINE__,
  1649. i, left_edge[i], i, right_edge[i]);
  1650. /*
  1651. * Check for cases where we haven't found the left edge,
  1652. * which makes our assignment of the the right edge invalid.
  1653. * Reset it to the illegal value.
  1654. */
  1655. if ((left_edge[i] == IO_IO_IN_DELAY_MAX + 1) && (
  1656. right_edge[i] != IO_IO_IN_DELAY_MAX + 1)) {
  1657. right_edge[i] = IO_IO_IN_DELAY_MAX + 1;
  1658. debug_cond(DLEVEL == 2, "%s:%d vfifo_center: reset \
  1659. right_edge[%u]: %d\n", __func__, __LINE__,
  1660. i, right_edge[i]);
  1661. }
  1662. /*
  1663. * Reset sticky bit (except for bits where we have seen
  1664. * both the left and right edge).
  1665. */
  1666. sticky_bit_chk = sticky_bit_chk << 1;
  1667. if ((left_edge[i] != IO_IO_IN_DELAY_MAX + 1) &&
  1668. (right_edge[i] != IO_IO_IN_DELAY_MAX + 1)) {
  1669. sticky_bit_chk = sticky_bit_chk | 1;
  1670. }
  1671. if (i == 0)
  1672. break;
  1673. }
  1674. /* Search for the right edge of the window for each bit */
  1675. for (d = 0; d <= IO_DQS_IN_DELAY_MAX - start_dqs; d++) {
  1676. scc_mgr_set_dqs_bus_in_delay(read_group, d + start_dqs);
  1677. if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
  1678. uint32_t delay = d + start_dqs_en;
  1679. if (delay > IO_DQS_EN_DELAY_MAX)
  1680. delay = IO_DQS_EN_DELAY_MAX;
  1681. scc_mgr_set_dqs_en_delay(read_group, delay);
  1682. }
  1683. scc_mgr_load_dqs(read_group);
  1684. writel(0, &sdr_scc_mgr->update);
  1685. /*
  1686. * Stop searching when the read test doesn't pass AND when
  1687. * we've seen a passing read on every bit.
  1688. */
  1689. if (use_read_test) {
  1690. stop = !rw_mgr_mem_calibrate_read_test(rank_bgn,
  1691. read_group, NUM_READ_PB_TESTS, PASS_ONE_BIT,
  1692. &bit_chk, 0, 0);
  1693. } else {
  1694. rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
  1695. 0, PASS_ONE_BIT,
  1696. &bit_chk, 0);
  1697. bit_chk = bit_chk >> (RW_MGR_MEM_DQ_PER_READ_DQS *
  1698. (read_group - (write_group *
  1699. RW_MGR_MEM_IF_READ_DQS_WIDTH /
  1700. RW_MGR_MEM_IF_WRITE_DQS_WIDTH)));
  1701. stop = (bit_chk == 0);
  1702. }
  1703. sticky_bit_chk = sticky_bit_chk | bit_chk;
  1704. stop = stop && (sticky_bit_chk == param->read_correct_mask);
  1705. debug_cond(DLEVEL == 2, "%s:%d vfifo_center(right): dtap=%u => %u == \
  1706. %u && %u", __func__, __LINE__, d,
  1707. sticky_bit_chk, param->read_correct_mask, stop);
  1708. if (stop == 1) {
  1709. break;
  1710. } else {
  1711. for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
  1712. if (bit_chk & 1) {
  1713. /* Remember a passing test as
  1714. the right_edge */
  1715. right_edge[i] = d;
  1716. } else {
  1717. if (d != 0) {
  1718. /* If a right edge has not been
  1719. seen yet, then a future passing
  1720. test will mark this edge as the
  1721. left edge */
  1722. if (right_edge[i] ==
  1723. IO_IO_IN_DELAY_MAX + 1) {
  1724. left_edge[i] = -(d + 1);
  1725. }
  1726. } else {
  1727. /* d = 0 failed, but it passed
  1728. when testing the left edge,
  1729. so it must be marginal,
  1730. set it to -1 */
  1731. if (right_edge[i] ==
  1732. IO_IO_IN_DELAY_MAX + 1 &&
  1733. left_edge[i] !=
  1734. IO_IO_IN_DELAY_MAX
  1735. + 1) {
  1736. right_edge[i] = -1;
  1737. }
  1738. /* If a right edge has not been
  1739. seen yet, then a future passing
  1740. test will mark this edge as the
  1741. left edge */
  1742. else if (right_edge[i] ==
  1743. IO_IO_IN_DELAY_MAX +
  1744. 1) {
  1745. left_edge[i] = -(d + 1);
  1746. }
  1747. }
  1748. }
  1749. debug_cond(DLEVEL == 2, "%s:%d vfifo_center[r,\
  1750. d=%u]: ", __func__, __LINE__, d);
  1751. debug_cond(DLEVEL == 2, "bit_chk_test=%d left_edge[%u]: %d ",
  1752. (int)(bit_chk & 1), i, left_edge[i]);
  1753. debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i,
  1754. right_edge[i]);
  1755. bit_chk = bit_chk >> 1;
  1756. }
  1757. }
  1758. }
  1759. /* Check that all bits have a window */
  1760. for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
  1761. debug_cond(DLEVEL == 2, "%s:%d vfifo_center: left_edge[%u]: \
  1762. %d right_edge[%u]: %d", __func__, __LINE__,
  1763. i, left_edge[i], i, right_edge[i]);
  1764. if ((left_edge[i] == IO_IO_IN_DELAY_MAX + 1) || (right_edge[i]
  1765. == IO_IO_IN_DELAY_MAX + 1)) {
  1766. /*
  1767. * Restore delay chain settings before letting the loop
  1768. * in rw_mgr_mem_calibrate_vfifo to retry different
  1769. * dqs/ck relationships.
  1770. */
  1771. scc_mgr_set_dqs_bus_in_delay(read_group, start_dqs);
  1772. if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
  1773. scc_mgr_set_dqs_en_delay(read_group,
  1774. start_dqs_en);
  1775. }
  1776. scc_mgr_load_dqs(read_group);
  1777. writel(0, &sdr_scc_mgr->update);
  1778. debug_cond(DLEVEL == 1, "%s:%d vfifo_center: failed to \
  1779. find edge [%u]: %d %d", __func__, __LINE__,
  1780. i, left_edge[i], right_edge[i]);
  1781. if (use_read_test) {
  1782. set_failing_group_stage(read_group *
  1783. RW_MGR_MEM_DQ_PER_READ_DQS + i,
  1784. CAL_STAGE_VFIFO,
  1785. CAL_SUBSTAGE_VFIFO_CENTER);
  1786. } else {
  1787. set_failing_group_stage(read_group *
  1788. RW_MGR_MEM_DQ_PER_READ_DQS + i,
  1789. CAL_STAGE_VFIFO_AFTER_WRITES,
  1790. CAL_SUBSTAGE_VFIFO_CENTER);
  1791. }
  1792. return 0;
  1793. }
  1794. }
  1795. /* Find middle of window for each DQ bit */
  1796. mid_min = left_edge[0] - right_edge[0];
  1797. min_index = 0;
  1798. for (i = 1; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
  1799. mid = left_edge[i] - right_edge[i];
  1800. if (mid < mid_min) {
  1801. mid_min = mid;
  1802. min_index = i;
  1803. }
  1804. }
  1805. /*
  1806. * -mid_min/2 represents the amount that we need to move DQS.
  1807. * If mid_min is odd and positive we'll need to add one to
  1808. * make sure the rounding in further calculations is correct
  1809. * (always bias to the right), so just add 1 for all positive values.
  1810. */
  1811. if (mid_min > 0)
  1812. mid_min++;
  1813. mid_min = mid_min / 2;
  1814. debug_cond(DLEVEL == 1, "%s:%d vfifo_center: mid_min=%d (index=%u)\n",
  1815. __func__, __LINE__, mid_min, min_index);
  1816. /* Determine the amount we can change DQS (which is -mid_min) */
  1817. orig_mid_min = mid_min;
  1818. new_dqs = start_dqs - mid_min;
  1819. if (new_dqs > IO_DQS_IN_DELAY_MAX)
  1820. new_dqs = IO_DQS_IN_DELAY_MAX;
  1821. else if (new_dqs < 0)
  1822. new_dqs = 0;
  1823. mid_min = start_dqs - new_dqs;
  1824. debug_cond(DLEVEL == 1, "vfifo_center: new mid_min=%d new_dqs=%d\n",
  1825. mid_min, new_dqs);
  1826. if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
  1827. if (start_dqs_en - mid_min > IO_DQS_EN_DELAY_MAX)
  1828. mid_min += start_dqs_en - mid_min - IO_DQS_EN_DELAY_MAX;
  1829. else if (start_dqs_en - mid_min < 0)
  1830. mid_min += start_dqs_en - mid_min;
  1831. }
  1832. new_dqs = start_dqs - mid_min;
  1833. debug_cond(DLEVEL == 1, "vfifo_center: start_dqs=%d start_dqs_en=%d \
  1834. new_dqs=%d mid_min=%d\n", start_dqs,
  1835. IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS ? start_dqs_en : -1,
  1836. new_dqs, mid_min);
  1837. /* Initialize data for export structures */
  1838. dqs_margin = IO_IO_IN_DELAY_MAX + 1;
  1839. dq_margin = IO_IO_IN_DELAY_MAX + 1;
  1840. /* add delay to bring centre of all DQ windows to the same "level" */
  1841. for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++) {
  1842. /* Use values before divide by 2 to reduce round off error */
  1843. shift_dq = (left_edge[i] - right_edge[i] -
  1844. (left_edge[min_index] - right_edge[min_index]))/2 +
  1845. (orig_mid_min - mid_min);
  1846. debug_cond(DLEVEL == 2, "vfifo_center: before: \
  1847. shift_dq[%u]=%d\n", i, shift_dq);
  1848. addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_IN_DELAY_OFFSET;
  1849. temp_dq_in_delay1 = readl(addr + (p << 2));
  1850. temp_dq_in_delay2 = readl(addr + (i << 2));
  1851. if (shift_dq + (int32_t)temp_dq_in_delay1 >
  1852. (int32_t)IO_IO_IN_DELAY_MAX) {
  1853. shift_dq = (int32_t)IO_IO_IN_DELAY_MAX - temp_dq_in_delay2;
  1854. } else if (shift_dq + (int32_t)temp_dq_in_delay1 < 0) {
  1855. shift_dq = -(int32_t)temp_dq_in_delay1;
  1856. }
  1857. debug_cond(DLEVEL == 2, "vfifo_center: after: \
  1858. shift_dq[%u]=%d\n", i, shift_dq);
  1859. final_dq[i] = temp_dq_in_delay1 + shift_dq;
  1860. scc_mgr_set_dq_in_delay(p, final_dq[i]);
  1861. scc_mgr_load_dq(p);
  1862. debug_cond(DLEVEL == 2, "vfifo_center: margin[%u]=[%d,%d]\n", i,
  1863. left_edge[i] - shift_dq + (-mid_min),
  1864. right_edge[i] + shift_dq - (-mid_min));
  1865. /* To determine values for export structures */
  1866. if (left_edge[i] - shift_dq + (-mid_min) < dq_margin)
  1867. dq_margin = left_edge[i] - shift_dq + (-mid_min);
  1868. if (right_edge[i] + shift_dq - (-mid_min) < dqs_margin)
  1869. dqs_margin = right_edge[i] + shift_dq - (-mid_min);
  1870. }
  1871. final_dqs = new_dqs;
  1872. if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS)
  1873. final_dqs_en = start_dqs_en - mid_min;
  1874. /* Move DQS-en */
  1875. if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
  1876. scc_mgr_set_dqs_en_delay(read_group, final_dqs_en);
  1877. scc_mgr_load_dqs(read_group);
  1878. }
  1879. /* Move DQS */
  1880. scc_mgr_set_dqs_bus_in_delay(read_group, final_dqs);
  1881. scc_mgr_load_dqs(read_group);
  1882. debug_cond(DLEVEL == 2, "%s:%d vfifo_center: dq_margin=%d \
  1883. dqs_margin=%d", __func__, __LINE__,
  1884. dq_margin, dqs_margin);
  1885. /*
  1886. * Do not remove this line as it makes sure all of our decisions
  1887. * have been applied. Apply the update bit.
  1888. */
  1889. writel(0, &sdr_scc_mgr->update);
  1890. return (dq_margin >= 0) && (dqs_margin >= 0);
  1891. }
  1892. /*
  1893. * calibrate the read valid prediction FIFO.
  1894. *
  1895. * - read valid prediction will consist of finding a good DQS enable phase,
  1896. * DQS enable delay, DQS input phase, and DQS input delay.
  1897. * - we also do a per-bit deskew on the DQ lines.
  1898. */
  1899. static uint32_t rw_mgr_mem_calibrate_vfifo(uint32_t read_group,
  1900. uint32_t test_bgn)
  1901. {
  1902. uint32_t p, d, rank_bgn, sr;
  1903. uint32_t dtaps_per_ptap;
  1904. uint32_t tmp_delay;
  1905. uint32_t bit_chk;
  1906. uint32_t grp_calibrated;
  1907. uint32_t write_group, write_test_bgn;
  1908. uint32_t failed_substage;
  1909. debug("%s:%d: %u %u\n", __func__, __LINE__, read_group, test_bgn);
  1910. /* update info for sims */
  1911. reg_file_set_stage(CAL_STAGE_VFIFO);
  1912. write_group = read_group;
  1913. write_test_bgn = test_bgn;
  1914. /* USER Determine number of delay taps for each phase tap */
  1915. dtaps_per_ptap = 0;
  1916. tmp_delay = 0;
  1917. while (tmp_delay < IO_DELAY_PER_OPA_TAP) {
  1918. dtaps_per_ptap++;
  1919. tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
  1920. }
  1921. dtaps_per_ptap--;
  1922. tmp_delay = 0;
  1923. /* update info for sims */
  1924. reg_file_set_group(read_group);
  1925. grp_calibrated = 0;
  1926. reg_file_set_sub_stage(CAL_SUBSTAGE_GUARANTEED_READ);
  1927. failed_substage = CAL_SUBSTAGE_GUARANTEED_READ;
  1928. for (d = 0; d <= dtaps_per_ptap && grp_calibrated == 0; d += 2) {
  1929. /*
  1930. * In RLDRAMX we may be messing the delay of pins in
  1931. * the same write group but outside of the current read
  1932. * the group, but that's ok because we haven't
  1933. * calibrated output side yet.
  1934. */
  1935. if (d > 0) {
  1936. scc_mgr_apply_group_all_out_delay_add_all_ranks
  1937. (write_group, write_test_bgn, d);
  1938. }
  1939. for (p = 0; p <= IO_DQDQS_OUT_PHASE_MAX && grp_calibrated == 0;
  1940. p++) {
  1941. /* set a particular dqdqs phase */
  1942. scc_mgr_set_dqdqs_output_phase_all_ranks(read_group, p);
  1943. debug_cond(DLEVEL == 1, "%s:%d calibrate_vfifo: g=%u \
  1944. p=%u d=%u\n", __func__, __LINE__,
  1945. read_group, p, d);
  1946. /*
  1947. * Load up the patterns used by read calibration
  1948. * using current DQDQS phase.
  1949. */
  1950. rw_mgr_mem_calibrate_read_load_patterns(0, 1);
  1951. if (!(gbl->phy_debug_mode_flags &
  1952. PHY_DEBUG_DISABLE_GUARANTEED_READ)) {
  1953. if (!rw_mgr_mem_calibrate_read_test_patterns_all_ranks
  1954. (read_group, 1, &bit_chk)) {
  1955. debug_cond(DLEVEL == 1, "%s:%d Guaranteed read test failed:",
  1956. __func__, __LINE__);
  1957. debug_cond(DLEVEL == 1, " g=%u p=%u d=%u\n",
  1958. read_group, p, d);
  1959. break;
  1960. }
  1961. }
  1962. /* case:56390 */
  1963. grp_calibrated = 1;
  1964. if (rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase_sweep_dq_in_delay
  1965. (write_group, read_group, test_bgn)) {
  1966. /*
  1967. * USER Read per-bit deskew can be done on a
  1968. * per shadow register basis.
  1969. */
  1970. for (rank_bgn = 0, sr = 0;
  1971. rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS;
  1972. rank_bgn += NUM_RANKS_PER_SHADOW_REG,
  1973. ++sr) {
  1974. /*
  1975. * Determine if this set of ranks
  1976. * should be skipped entirely.
  1977. */
  1978. if (!param->skip_shadow_regs[sr]) {
  1979. /*
  1980. * If doing read after write
  1981. * calibration, do not update
  1982. * FOM, now - do it then.
  1983. */
  1984. if (!rw_mgr_mem_calibrate_vfifo_center
  1985. (rank_bgn, write_group,
  1986. read_group, test_bgn, 1, 0)) {
  1987. grp_calibrated = 0;
  1988. failed_substage =
  1989. CAL_SUBSTAGE_VFIFO_CENTER;
  1990. }
  1991. }
  1992. }
  1993. } else {
  1994. grp_calibrated = 0;
  1995. failed_substage = CAL_SUBSTAGE_DQS_EN_PHASE;
  1996. }
  1997. }
  1998. }
  1999. if (grp_calibrated == 0) {
  2000. set_failing_group_stage(write_group, CAL_STAGE_VFIFO,
  2001. failed_substage);
  2002. return 0;
  2003. }
  2004. /*
  2005. * Reset the delay chains back to zero if they have moved > 1
  2006. * (check for > 1 because loop will increase d even when pass in
  2007. * first case).
  2008. */
  2009. if (d > 2)
  2010. scc_mgr_zero_group(write_group, write_test_bgn, 1);
  2011. return 1;
  2012. }
  2013. /* VFIFO Calibration -- Read Deskew Calibration after write deskew */
  2014. static uint32_t rw_mgr_mem_calibrate_vfifo_end(uint32_t read_group,
  2015. uint32_t test_bgn)
  2016. {
  2017. uint32_t rank_bgn, sr;
  2018. uint32_t grp_calibrated;
  2019. uint32_t write_group;
  2020. debug("%s:%d %u %u", __func__, __LINE__, read_group, test_bgn);
  2021. /* update info for sims */
  2022. reg_file_set_stage(CAL_STAGE_VFIFO_AFTER_WRITES);
  2023. reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER);
  2024. write_group = read_group;
  2025. /* update info for sims */
  2026. reg_file_set_group(read_group);
  2027. grp_calibrated = 1;
  2028. /* Read per-bit deskew can be done on a per shadow register basis */
  2029. for (rank_bgn = 0, sr = 0; rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS;
  2030. rank_bgn += NUM_RANKS_PER_SHADOW_REG, ++sr) {
  2031. /* Determine if this set of ranks should be skipped entirely */
  2032. if (!param->skip_shadow_regs[sr]) {
  2033. /* This is the last calibration round, update FOM here */
  2034. if (!rw_mgr_mem_calibrate_vfifo_center(rank_bgn,
  2035. write_group,
  2036. read_group,
  2037. test_bgn, 0,
  2038. 1)) {
  2039. grp_calibrated = 0;
  2040. }
  2041. }
  2042. }
  2043. if (grp_calibrated == 0) {
  2044. set_failing_group_stage(write_group,
  2045. CAL_STAGE_VFIFO_AFTER_WRITES,
  2046. CAL_SUBSTAGE_VFIFO_CENTER);
  2047. return 0;
  2048. }
  2049. return 1;
  2050. }
  2051. /* Calibrate LFIFO to find smallest read latency */
  2052. static uint32_t rw_mgr_mem_calibrate_lfifo(void)
  2053. {
  2054. uint32_t found_one;
  2055. uint32_t bit_chk;
  2056. debug("%s:%d\n", __func__, __LINE__);
  2057. /* update info for sims */
  2058. reg_file_set_stage(CAL_STAGE_LFIFO);
  2059. reg_file_set_sub_stage(CAL_SUBSTAGE_READ_LATENCY);
  2060. /* Load up the patterns used by read calibration for all ranks */
  2061. rw_mgr_mem_calibrate_read_load_patterns(0, 1);
  2062. found_one = 0;
  2063. do {
  2064. writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
  2065. debug_cond(DLEVEL == 2, "%s:%d lfifo: read_lat=%u",
  2066. __func__, __LINE__, gbl->curr_read_lat);
  2067. if (!rw_mgr_mem_calibrate_read_test_all_ranks(0,
  2068. NUM_READ_TESTS,
  2069. PASS_ALL_BITS,
  2070. &bit_chk, 1)) {
  2071. break;
  2072. }
  2073. found_one = 1;
  2074. /* reduce read latency and see if things are working */
  2075. /* correctly */
  2076. gbl->curr_read_lat--;
  2077. } while (gbl->curr_read_lat > 0);
  2078. /* reset the fifos to get pointers to known state */
  2079. writel(0, &phy_mgr_cmd->fifo_reset);
  2080. if (found_one) {
  2081. /* add a fudge factor to the read latency that was determined */
  2082. gbl->curr_read_lat += 2;
  2083. writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
  2084. debug_cond(DLEVEL == 2, "%s:%d lfifo: success: using \
  2085. read_lat=%u\n", __func__, __LINE__,
  2086. gbl->curr_read_lat);
  2087. return 1;
  2088. } else {
  2089. set_failing_group_stage(0xff, CAL_STAGE_LFIFO,
  2090. CAL_SUBSTAGE_READ_LATENCY);
  2091. debug_cond(DLEVEL == 2, "%s:%d lfifo: failed at initial \
  2092. read_lat=%u\n", __func__, __LINE__,
  2093. gbl->curr_read_lat);
  2094. return 0;
  2095. }
  2096. }
  2097. /*
  2098. * issue write test command.
  2099. * two variants are provided. one that just tests a write pattern and
  2100. * another that tests datamask functionality.
  2101. */
  2102. static void rw_mgr_mem_calibrate_write_test_issue(uint32_t group,
  2103. uint32_t test_dm)
  2104. {
  2105. uint32_t mcc_instruction;
  2106. uint32_t quick_write_mode = (((STATIC_CALIB_STEPS) & CALIB_SKIP_WRITES) &&
  2107. ENABLE_SUPER_QUICK_CALIBRATION);
  2108. uint32_t rw_wl_nop_cycles;
  2109. uint32_t addr;
  2110. /*
  2111. * Set counter and jump addresses for the right
  2112. * number of NOP cycles.
  2113. * The number of supported NOP cycles can range from -1 to infinity
  2114. * Three different cases are handled:
  2115. *
  2116. * 1. For a number of NOP cycles greater than 0, the RW Mgr looping
  2117. * mechanism will be used to insert the right number of NOPs
  2118. *
  2119. * 2. For a number of NOP cycles equals to 0, the micro-instruction
  2120. * issuing the write command will jump straight to the
  2121. * micro-instruction that turns on DQS (for DDRx), or outputs write
  2122. * data (for RLD), skipping
  2123. * the NOP micro-instruction all together
  2124. *
  2125. * 3. A number of NOP cycles equal to -1 indicates that DQS must be
  2126. * turned on in the same micro-instruction that issues the write
  2127. * command. Then we need
  2128. * to directly jump to the micro-instruction that sends out the data
  2129. *
  2130. * NOTE: Implementing this mechanism uses 2 RW Mgr jump-counters
  2131. * (2 and 3). One jump-counter (0) is used to perform multiple
  2132. * write-read operations.
  2133. * one counter left to issue this command in "multiple-group" mode
  2134. */
  2135. rw_wl_nop_cycles = gbl->rw_wl_nop_cycles;
  2136. if (rw_wl_nop_cycles == -1) {
  2137. /*
  2138. * CNTR 2 - We want to execute the special write operation that
  2139. * turns on DQS right away and then skip directly to the
  2140. * instruction that sends out the data. We set the counter to a
  2141. * large number so that the jump is always taken.
  2142. */
  2143. writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2);
  2144. /* CNTR 3 - Not used */
  2145. if (test_dm) {
  2146. mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0_WL_1;
  2147. writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_DATA,
  2148. &sdr_rw_load_jump_mgr_regs->load_jump_add2);
  2149. writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP,
  2150. &sdr_rw_load_jump_mgr_regs->load_jump_add3);
  2151. } else {
  2152. mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0_WL_1;
  2153. writel(RW_MGR_LFSR_WR_RD_BANK_0_DATA,
  2154. &sdr_rw_load_jump_mgr_regs->load_jump_add2);
  2155. writel(RW_MGR_LFSR_WR_RD_BANK_0_NOP,
  2156. &sdr_rw_load_jump_mgr_regs->load_jump_add3);
  2157. }
  2158. } else if (rw_wl_nop_cycles == 0) {
  2159. /*
  2160. * CNTR 2 - We want to skip the NOP operation and go straight
  2161. * to the DQS enable instruction. We set the counter to a large
  2162. * number so that the jump is always taken.
  2163. */
  2164. writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2);
  2165. /* CNTR 3 - Not used */
  2166. if (test_dm) {
  2167. mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0;
  2168. writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_DQS,
  2169. &sdr_rw_load_jump_mgr_regs->load_jump_add2);
  2170. } else {
  2171. mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0;
  2172. writel(RW_MGR_LFSR_WR_RD_BANK_0_DQS,
  2173. &sdr_rw_load_jump_mgr_regs->load_jump_add2);
  2174. }
  2175. } else {
  2176. /*
  2177. * CNTR 2 - In this case we want to execute the next instruction
  2178. * and NOT take the jump. So we set the counter to 0. The jump
  2179. * address doesn't count.
  2180. */
  2181. writel(0x0, &sdr_rw_load_mgr_regs->load_cntr2);
  2182. writel(0x0, &sdr_rw_load_jump_mgr_regs->load_jump_add2);
  2183. /*
  2184. * CNTR 3 - Set the nop counter to the number of cycles we
  2185. * need to loop for, minus 1.
  2186. */
  2187. writel(rw_wl_nop_cycles - 1, &sdr_rw_load_mgr_regs->load_cntr3);
  2188. if (test_dm) {
  2189. mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0;
  2190. writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP,
  2191. &sdr_rw_load_jump_mgr_regs->load_jump_add3);
  2192. } else {
  2193. mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0;
  2194. writel(RW_MGR_LFSR_WR_RD_BANK_0_NOP,
  2195. &sdr_rw_load_jump_mgr_regs->load_jump_add3);
  2196. }
  2197. }
  2198. writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
  2199. RW_MGR_RESET_READ_DATAPATH_OFFSET);
  2200. if (quick_write_mode)
  2201. writel(0x08, &sdr_rw_load_mgr_regs->load_cntr0);
  2202. else
  2203. writel(0x40, &sdr_rw_load_mgr_regs->load_cntr0);
  2204. writel(mcc_instruction, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
  2205. /*
  2206. * CNTR 1 - This is used to ensure enough time elapses
  2207. * for read data to come back.
  2208. */
  2209. writel(0x30, &sdr_rw_load_mgr_regs->load_cntr1);
  2210. if (test_dm) {
  2211. writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_WAIT,
  2212. &sdr_rw_load_jump_mgr_regs->load_jump_add1);
  2213. } else {
  2214. writel(RW_MGR_LFSR_WR_RD_BANK_0_WAIT,
  2215. &sdr_rw_load_jump_mgr_regs->load_jump_add1);
  2216. }
  2217. addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
  2218. writel(mcc_instruction, addr + (group << 2));
  2219. }
  2220. /* Test writes, can check for a single bit pass or multiple bit pass */
  2221. static uint32_t rw_mgr_mem_calibrate_write_test(uint32_t rank_bgn,
  2222. uint32_t write_group, uint32_t use_dm, uint32_t all_correct,
  2223. uint32_t *bit_chk, uint32_t all_ranks)
  2224. {
  2225. uint32_t r;
  2226. uint32_t correct_mask_vg;
  2227. uint32_t tmp_bit_chk;
  2228. uint32_t vg;
  2229. uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
  2230. (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
  2231. uint32_t addr_rw_mgr;
  2232. uint32_t base_rw_mgr;
  2233. *bit_chk = param->write_correct_mask;
  2234. correct_mask_vg = param->write_correct_mask_vg;
  2235. for (r = rank_bgn; r < rank_end; r++) {
  2236. if (param->skip_ranks[r]) {
  2237. /* request to skip the rank */
  2238. continue;
  2239. }
  2240. /* set rank */
  2241. set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
  2242. tmp_bit_chk = 0;
  2243. addr_rw_mgr = SDR_PHYGRP_RWMGRGRP_ADDRESS;
  2244. for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS-1; ; vg--) {
  2245. /* reset the fifos to get pointers to known state */
  2246. writel(0, &phy_mgr_cmd->fifo_reset);
  2247. tmp_bit_chk = tmp_bit_chk <<
  2248. (RW_MGR_MEM_DQ_PER_WRITE_DQS /
  2249. RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS);
  2250. rw_mgr_mem_calibrate_write_test_issue(write_group *
  2251. RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS+vg,
  2252. use_dm);
  2253. base_rw_mgr = readl(addr_rw_mgr);
  2254. tmp_bit_chk = tmp_bit_chk | (correct_mask_vg & ~(base_rw_mgr));
  2255. if (vg == 0)
  2256. break;
  2257. }
  2258. *bit_chk &= tmp_bit_chk;
  2259. }
  2260. if (all_correct) {
  2261. set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
  2262. debug_cond(DLEVEL == 2, "write_test(%u,%u,ALL) : %u == \
  2263. %u => %lu", write_group, use_dm,
  2264. *bit_chk, param->write_correct_mask,
  2265. (long unsigned int)(*bit_chk ==
  2266. param->write_correct_mask));
  2267. return *bit_chk == param->write_correct_mask;
  2268. } else {
  2269. set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
  2270. debug_cond(DLEVEL == 2, "write_test(%u,%u,ONE) : %u != ",
  2271. write_group, use_dm, *bit_chk);
  2272. debug_cond(DLEVEL == 2, "%lu" " => %lu", (long unsigned int)0,
  2273. (long unsigned int)(*bit_chk != 0));
  2274. return *bit_chk != 0x00;
  2275. }
  2276. }
  2277. /*
  2278. * center all windows. do per-bit-deskew to possibly increase size of
  2279. * certain windows.
  2280. */
  2281. static uint32_t rw_mgr_mem_calibrate_writes_center(uint32_t rank_bgn,
  2282. uint32_t write_group, uint32_t test_bgn)
  2283. {
  2284. uint32_t i, p, min_index;
  2285. int32_t d;
  2286. /*
  2287. * Store these as signed since there are comparisons with
  2288. * signed numbers.
  2289. */
  2290. uint32_t bit_chk;
  2291. uint32_t sticky_bit_chk;
  2292. int32_t left_edge[RW_MGR_MEM_DQ_PER_WRITE_DQS];
  2293. int32_t right_edge[RW_MGR_MEM_DQ_PER_WRITE_DQS];
  2294. int32_t mid;
  2295. int32_t mid_min, orig_mid_min;
  2296. int32_t new_dqs, start_dqs, shift_dq;
  2297. int32_t dq_margin, dqs_margin, dm_margin;
  2298. uint32_t stop;
  2299. uint32_t temp_dq_out1_delay;
  2300. uint32_t addr;
  2301. debug("%s:%d %u %u", __func__, __LINE__, write_group, test_bgn);
  2302. dm_margin = 0;
  2303. addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_OUT1_DELAY_OFFSET;
  2304. start_dqs = readl(addr +
  2305. (RW_MGR_MEM_DQ_PER_WRITE_DQS << 2));
  2306. /* per-bit deskew */
  2307. /*
  2308. * set the left and right edge of each bit to an illegal value
  2309. * use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value.
  2310. */
  2311. sticky_bit_chk = 0;
  2312. for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
  2313. left_edge[i] = IO_IO_OUT1_DELAY_MAX + 1;
  2314. right_edge[i] = IO_IO_OUT1_DELAY_MAX + 1;
  2315. }
  2316. /* Search for the left edge of the window for each bit */
  2317. for (d = 0; d <= IO_IO_OUT1_DELAY_MAX; d++) {
  2318. scc_mgr_apply_group_dq_out1_delay(write_group, test_bgn, d);
  2319. writel(0, &sdr_scc_mgr->update);
  2320. /*
  2321. * Stop searching when the read test doesn't pass AND when
  2322. * we've seen a passing read on every bit.
  2323. */
  2324. stop = !rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
  2325. 0, PASS_ONE_BIT, &bit_chk, 0);
  2326. sticky_bit_chk = sticky_bit_chk | bit_chk;
  2327. stop = stop && (sticky_bit_chk == param->write_correct_mask);
  2328. debug_cond(DLEVEL == 2, "write_center(left): dtap=%d => %u \
  2329. == %u && %u [bit_chk= %u ]\n",
  2330. d, sticky_bit_chk, param->write_correct_mask,
  2331. stop, bit_chk);
  2332. if (stop == 1) {
  2333. break;
  2334. } else {
  2335. for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
  2336. if (bit_chk & 1) {
  2337. /*
  2338. * Remember a passing test as the
  2339. * left_edge.
  2340. */
  2341. left_edge[i] = d;
  2342. } else {
  2343. /*
  2344. * If a left edge has not been seen
  2345. * yet, then a future passing test will
  2346. * mark this edge as the right edge.
  2347. */
  2348. if (left_edge[i] ==
  2349. IO_IO_OUT1_DELAY_MAX + 1) {
  2350. right_edge[i] = -(d + 1);
  2351. }
  2352. }
  2353. debug_cond(DLEVEL == 2, "write_center[l,d=%d):", d);
  2354. debug_cond(DLEVEL == 2, "bit_chk_test=%d left_edge[%u]: %d",
  2355. (int)(bit_chk & 1), i, left_edge[i]);
  2356. debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i,
  2357. right_edge[i]);
  2358. bit_chk = bit_chk >> 1;
  2359. }
  2360. }
  2361. }
  2362. /* Reset DQ delay chains to 0 */
  2363. scc_mgr_apply_group_dq_out1_delay(write_group, test_bgn, 0);
  2364. sticky_bit_chk = 0;
  2365. for (i = RW_MGR_MEM_DQ_PER_WRITE_DQS - 1;; i--) {
  2366. debug_cond(DLEVEL == 2, "%s:%d write_center: left_edge[%u]: \
  2367. %d right_edge[%u]: %d\n", __func__, __LINE__,
  2368. i, left_edge[i], i, right_edge[i]);
  2369. /*
  2370. * Check for cases where we haven't found the left edge,
  2371. * which makes our assignment of the the right edge invalid.
  2372. * Reset it to the illegal value.
  2373. */
  2374. if ((left_edge[i] == IO_IO_OUT1_DELAY_MAX + 1) &&
  2375. (right_edge[i] != IO_IO_OUT1_DELAY_MAX + 1)) {
  2376. right_edge[i] = IO_IO_OUT1_DELAY_MAX + 1;
  2377. debug_cond(DLEVEL == 2, "%s:%d write_center: reset \
  2378. right_edge[%u]: %d\n", __func__, __LINE__,
  2379. i, right_edge[i]);
  2380. }
  2381. /*
  2382. * Reset sticky bit (except for bits where we have
  2383. * seen the left edge).
  2384. */
  2385. sticky_bit_chk = sticky_bit_chk << 1;
  2386. if ((left_edge[i] != IO_IO_OUT1_DELAY_MAX + 1))
  2387. sticky_bit_chk = sticky_bit_chk | 1;
  2388. if (i == 0)
  2389. break;
  2390. }
  2391. /* Search for the right edge of the window for each bit */
  2392. for (d = 0; d <= IO_IO_OUT1_DELAY_MAX - start_dqs; d++) {
  2393. scc_mgr_apply_group_dqs_io_and_oct_out1(write_group,
  2394. d + start_dqs);
  2395. writel(0, &sdr_scc_mgr->update);
  2396. /*
  2397. * Stop searching when the read test doesn't pass AND when
  2398. * we've seen a passing read on every bit.
  2399. */
  2400. stop = !rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
  2401. 0, PASS_ONE_BIT, &bit_chk, 0);
  2402. sticky_bit_chk = sticky_bit_chk | bit_chk;
  2403. stop = stop && (sticky_bit_chk == param->write_correct_mask);
  2404. debug_cond(DLEVEL == 2, "write_center (right): dtap=%u => %u == \
  2405. %u && %u\n", d, sticky_bit_chk,
  2406. param->write_correct_mask, stop);
  2407. if (stop == 1) {
  2408. if (d == 0) {
  2409. for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS;
  2410. i++) {
  2411. /* d = 0 failed, but it passed when
  2412. testing the left edge, so it must be
  2413. marginal, set it to -1 */
  2414. if (right_edge[i] ==
  2415. IO_IO_OUT1_DELAY_MAX + 1 &&
  2416. left_edge[i] !=
  2417. IO_IO_OUT1_DELAY_MAX + 1) {
  2418. right_edge[i] = -1;
  2419. }
  2420. }
  2421. }
  2422. break;
  2423. } else {
  2424. for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
  2425. if (bit_chk & 1) {
  2426. /*
  2427. * Remember a passing test as
  2428. * the right_edge.
  2429. */
  2430. right_edge[i] = d;
  2431. } else {
  2432. if (d != 0) {
  2433. /*
  2434. * If a right edge has not
  2435. * been seen yet, then a future
  2436. * passing test will mark this
  2437. * edge as the left edge.
  2438. */
  2439. if (right_edge[i] ==
  2440. IO_IO_OUT1_DELAY_MAX + 1)
  2441. left_edge[i] = -(d + 1);
  2442. } else {
  2443. /*
  2444. * d = 0 failed, but it passed
  2445. * when testing the left edge,
  2446. * so it must be marginal, set
  2447. * it to -1.
  2448. */
  2449. if (right_edge[i] ==
  2450. IO_IO_OUT1_DELAY_MAX + 1 &&
  2451. left_edge[i] !=
  2452. IO_IO_OUT1_DELAY_MAX + 1)
  2453. right_edge[i] = -1;
  2454. /*
  2455. * If a right edge has not been
  2456. * seen yet, then a future
  2457. * passing test will mark this
  2458. * edge as the left edge.
  2459. */
  2460. else if (right_edge[i] ==
  2461. IO_IO_OUT1_DELAY_MAX +
  2462. 1)
  2463. left_edge[i] = -(d + 1);
  2464. }
  2465. }
  2466. debug_cond(DLEVEL == 2, "write_center[r,d=%d):", d);
  2467. debug_cond(DLEVEL == 2, "bit_chk_test=%d left_edge[%u]: %d",
  2468. (int)(bit_chk & 1), i, left_edge[i]);
  2469. debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i,
  2470. right_edge[i]);
  2471. bit_chk = bit_chk >> 1;
  2472. }
  2473. }
  2474. }
  2475. /* Check that all bits have a window */
  2476. for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
  2477. debug_cond(DLEVEL == 2, "%s:%d write_center: left_edge[%u]: \
  2478. %d right_edge[%u]: %d", __func__, __LINE__,
  2479. i, left_edge[i], i, right_edge[i]);
  2480. if ((left_edge[i] == IO_IO_OUT1_DELAY_MAX + 1) ||
  2481. (right_edge[i] == IO_IO_OUT1_DELAY_MAX + 1)) {
  2482. set_failing_group_stage(test_bgn + i,
  2483. CAL_STAGE_WRITES,
  2484. CAL_SUBSTAGE_WRITES_CENTER);
  2485. return 0;
  2486. }
  2487. }
  2488. /* Find middle of window for each DQ bit */
  2489. mid_min = left_edge[0] - right_edge[0];
  2490. min_index = 0;
  2491. for (i = 1; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
  2492. mid = left_edge[i] - right_edge[i];
  2493. if (mid < mid_min) {
  2494. mid_min = mid;
  2495. min_index = i;
  2496. }
  2497. }
  2498. /*
  2499. * -mid_min/2 represents the amount that we need to move DQS.
  2500. * If mid_min is odd and positive we'll need to add one to
  2501. * make sure the rounding in further calculations is correct
  2502. * (always bias to the right), so just add 1 for all positive values.
  2503. */
  2504. if (mid_min > 0)
  2505. mid_min++;
  2506. mid_min = mid_min / 2;
  2507. debug_cond(DLEVEL == 1, "%s:%d write_center: mid_min=%d\n", __func__,
  2508. __LINE__, mid_min);
  2509. /* Determine the amount we can change DQS (which is -mid_min) */
  2510. orig_mid_min = mid_min;
  2511. new_dqs = start_dqs;
  2512. mid_min = 0;
  2513. debug_cond(DLEVEL == 1, "%s:%d write_center: start_dqs=%d new_dqs=%d \
  2514. mid_min=%d\n", __func__, __LINE__, start_dqs, new_dqs, mid_min);
  2515. /* Initialize data for export structures */
  2516. dqs_margin = IO_IO_OUT1_DELAY_MAX + 1;
  2517. dq_margin = IO_IO_OUT1_DELAY_MAX + 1;
  2518. /* add delay to bring centre of all DQ windows to the same "level" */
  2519. for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++, p++) {
  2520. /* Use values before divide by 2 to reduce round off error */
  2521. shift_dq = (left_edge[i] - right_edge[i] -
  2522. (left_edge[min_index] - right_edge[min_index]))/2 +
  2523. (orig_mid_min - mid_min);
  2524. debug_cond(DLEVEL == 2, "%s:%d write_center: before: shift_dq \
  2525. [%u]=%d\n", __func__, __LINE__, i, shift_dq);
  2526. addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_OUT1_DELAY_OFFSET;
  2527. temp_dq_out1_delay = readl(addr + (i << 2));
  2528. if (shift_dq + (int32_t)temp_dq_out1_delay >
  2529. (int32_t)IO_IO_OUT1_DELAY_MAX) {
  2530. shift_dq = (int32_t)IO_IO_OUT1_DELAY_MAX - temp_dq_out1_delay;
  2531. } else if (shift_dq + (int32_t)temp_dq_out1_delay < 0) {
  2532. shift_dq = -(int32_t)temp_dq_out1_delay;
  2533. }
  2534. debug_cond(DLEVEL == 2, "write_center: after: shift_dq[%u]=%d\n",
  2535. i, shift_dq);
  2536. scc_mgr_set_dq_out1_delay(i, temp_dq_out1_delay + shift_dq);
  2537. scc_mgr_load_dq(i);
  2538. debug_cond(DLEVEL == 2, "write_center: margin[%u]=[%d,%d]\n", i,
  2539. left_edge[i] - shift_dq + (-mid_min),
  2540. right_edge[i] + shift_dq - (-mid_min));
  2541. /* To determine values for export structures */
  2542. if (left_edge[i] - shift_dq + (-mid_min) < dq_margin)
  2543. dq_margin = left_edge[i] - shift_dq + (-mid_min);
  2544. if (right_edge[i] + shift_dq - (-mid_min) < dqs_margin)
  2545. dqs_margin = right_edge[i] + shift_dq - (-mid_min);
  2546. }
  2547. /* Move DQS */
  2548. scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs);
  2549. writel(0, &sdr_scc_mgr->update);
  2550. /* Centre DM */
  2551. debug_cond(DLEVEL == 2, "%s:%d write_center: DM\n", __func__, __LINE__);
  2552. /*
  2553. * set the left and right edge of each bit to an illegal value,
  2554. * use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value,
  2555. */
  2556. left_edge[0] = IO_IO_OUT1_DELAY_MAX + 1;
  2557. right_edge[0] = IO_IO_OUT1_DELAY_MAX + 1;
  2558. int32_t bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
  2559. int32_t end_curr = IO_IO_OUT1_DELAY_MAX + 1;
  2560. int32_t bgn_best = IO_IO_OUT1_DELAY_MAX + 1;
  2561. int32_t end_best = IO_IO_OUT1_DELAY_MAX + 1;
  2562. int32_t win_best = 0;
  2563. /* Search for the/part of the window with DM shift */
  2564. for (d = IO_IO_OUT1_DELAY_MAX; d >= 0; d -= DELTA_D) {
  2565. scc_mgr_apply_group_dm_out1_delay(write_group, d);
  2566. writel(0, &sdr_scc_mgr->update);
  2567. if (rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1,
  2568. PASS_ALL_BITS, &bit_chk,
  2569. 0)) {
  2570. /* USE Set current end of the window */
  2571. end_curr = -d;
  2572. /*
  2573. * If a starting edge of our window has not been seen
  2574. * this is our current start of the DM window.
  2575. */
  2576. if (bgn_curr == IO_IO_OUT1_DELAY_MAX + 1)
  2577. bgn_curr = -d;
  2578. /*
  2579. * If current window is bigger than best seen.
  2580. * Set best seen to be current window.
  2581. */
  2582. if ((end_curr-bgn_curr+1) > win_best) {
  2583. win_best = end_curr-bgn_curr+1;
  2584. bgn_best = bgn_curr;
  2585. end_best = end_curr;
  2586. }
  2587. } else {
  2588. /* We just saw a failing test. Reset temp edge */
  2589. bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
  2590. end_curr = IO_IO_OUT1_DELAY_MAX + 1;
  2591. }
  2592. }
  2593. /* Reset DM delay chains to 0 */
  2594. scc_mgr_apply_group_dm_out1_delay(write_group, 0);
  2595. /*
  2596. * Check to see if the current window nudges up aganist 0 delay.
  2597. * If so we need to continue the search by shifting DQS otherwise DQS
  2598. * search begins as a new search. */
  2599. if (end_curr != 0) {
  2600. bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
  2601. end_curr = IO_IO_OUT1_DELAY_MAX + 1;
  2602. }
  2603. /* Search for the/part of the window with DQS shifts */
  2604. for (d = 0; d <= IO_IO_OUT1_DELAY_MAX - new_dqs; d += DELTA_D) {
  2605. /*
  2606. * Note: This only shifts DQS, so are we limiting ourselve to
  2607. * width of DQ unnecessarily.
  2608. */
  2609. scc_mgr_apply_group_dqs_io_and_oct_out1(write_group,
  2610. d + new_dqs);
  2611. writel(0, &sdr_scc_mgr->update);
  2612. if (rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1,
  2613. PASS_ALL_BITS, &bit_chk,
  2614. 0)) {
  2615. /* USE Set current end of the window */
  2616. end_curr = d;
  2617. /*
  2618. * If a beginning edge of our window has not been seen
  2619. * this is our current begin of the DM window.
  2620. */
  2621. if (bgn_curr == IO_IO_OUT1_DELAY_MAX + 1)
  2622. bgn_curr = d;
  2623. /*
  2624. * If current window is bigger than best seen. Set best
  2625. * seen to be current window.
  2626. */
  2627. if ((end_curr-bgn_curr+1) > win_best) {
  2628. win_best = end_curr-bgn_curr+1;
  2629. bgn_best = bgn_curr;
  2630. end_best = end_curr;
  2631. }
  2632. } else {
  2633. /* We just saw a failing test. Reset temp edge */
  2634. bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
  2635. end_curr = IO_IO_OUT1_DELAY_MAX + 1;
  2636. /* Early exit optimization: if ther remaining delay
  2637. chain space is less than already seen largest window
  2638. we can exit */
  2639. if ((win_best-1) >
  2640. (IO_IO_OUT1_DELAY_MAX - new_dqs - d)) {
  2641. break;
  2642. }
  2643. }
  2644. }
  2645. /* assign left and right edge for cal and reporting; */
  2646. left_edge[0] = -1*bgn_best;
  2647. right_edge[0] = end_best;
  2648. debug_cond(DLEVEL == 2, "%s:%d dm_calib: left=%d right=%d\n", __func__,
  2649. __LINE__, left_edge[0], right_edge[0]);
  2650. /* Move DQS (back to orig) */
  2651. scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs);
  2652. /* Move DM */
  2653. /* Find middle of window for the DM bit */
  2654. mid = (left_edge[0] - right_edge[0]) / 2;
  2655. /* only move right, since we are not moving DQS/DQ */
  2656. if (mid < 0)
  2657. mid = 0;
  2658. /* dm_marign should fail if we never find a window */
  2659. if (win_best == 0)
  2660. dm_margin = -1;
  2661. else
  2662. dm_margin = left_edge[0] - mid;
  2663. scc_mgr_apply_group_dm_out1_delay(write_group, mid);
  2664. writel(0, &sdr_scc_mgr->update);
  2665. debug_cond(DLEVEL == 2, "%s:%d dm_calib: left=%d right=%d mid=%d \
  2666. dm_margin=%d\n", __func__, __LINE__, left_edge[0],
  2667. right_edge[0], mid, dm_margin);
  2668. /* Export values */
  2669. gbl->fom_out += dq_margin + dqs_margin;
  2670. debug_cond(DLEVEL == 2, "%s:%d write_center: dq_margin=%d \
  2671. dqs_margin=%d dm_margin=%d\n", __func__, __LINE__,
  2672. dq_margin, dqs_margin, dm_margin);
  2673. /*
  2674. * Do not remove this line as it makes sure all of our
  2675. * decisions have been applied.
  2676. */
  2677. writel(0, &sdr_scc_mgr->update);
  2678. return (dq_margin >= 0) && (dqs_margin >= 0) && (dm_margin >= 0);
  2679. }
  2680. /* calibrate the write operations */
  2681. static uint32_t rw_mgr_mem_calibrate_writes(uint32_t rank_bgn, uint32_t g,
  2682. uint32_t test_bgn)
  2683. {
  2684. /* update info for sims */
  2685. debug("%s:%d %u %u\n", __func__, __LINE__, g, test_bgn);
  2686. reg_file_set_stage(CAL_STAGE_WRITES);
  2687. reg_file_set_sub_stage(CAL_SUBSTAGE_WRITES_CENTER);
  2688. reg_file_set_group(g);
  2689. if (!rw_mgr_mem_calibrate_writes_center(rank_bgn, g, test_bgn)) {
  2690. set_failing_group_stage(g, CAL_STAGE_WRITES,
  2691. CAL_SUBSTAGE_WRITES_CENTER);
  2692. return 0;
  2693. }
  2694. return 1;
  2695. }
  2696. /* precharge all banks and activate row 0 in bank "000..." and bank "111..." */
  2697. static void mem_precharge_and_activate(void)
  2698. {
  2699. uint32_t r;
  2700. for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) {
  2701. if (param->skip_ranks[r]) {
  2702. /* request to skip the rank */
  2703. continue;
  2704. }
  2705. /* set rank */
  2706. set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
  2707. /* precharge all banks ... */
  2708. writel(RW_MGR_PRECHARGE_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS |
  2709. RW_MGR_RUN_SINGLE_GROUP_OFFSET);
  2710. writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr0);
  2711. writel(RW_MGR_ACTIVATE_0_AND_1_WAIT1,
  2712. &sdr_rw_load_jump_mgr_regs->load_jump_add0);
  2713. writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr1);
  2714. writel(RW_MGR_ACTIVATE_0_AND_1_WAIT2,
  2715. &sdr_rw_load_jump_mgr_regs->load_jump_add1);
  2716. /* activate rows */
  2717. writel(RW_MGR_ACTIVATE_0_AND_1, SDR_PHYGRP_RWMGRGRP_ADDRESS |
  2718. RW_MGR_RUN_SINGLE_GROUP_OFFSET);
  2719. }
  2720. }
  2721. /* Configure various memory related parameters. */
  2722. static void mem_config(void)
  2723. {
  2724. uint32_t rlat, wlat;
  2725. uint32_t rw_wl_nop_cycles;
  2726. uint32_t max_latency;
  2727. debug("%s:%d\n", __func__, __LINE__);
  2728. /* read in write and read latency */
  2729. wlat = readl(&data_mgr->t_wl_add);
  2730. wlat += readl(&data_mgr->mem_t_add);
  2731. /* WL for hard phy does not include additive latency */
  2732. /*
  2733. * add addtional write latency to offset the address/command extra
  2734. * clock cycle. We change the AC mux setting causing AC to be delayed
  2735. * by one mem clock cycle. Only do this for DDR3
  2736. */
  2737. wlat = wlat + 1;
  2738. rlat = readl(&data_mgr->t_rl_add);
  2739. rw_wl_nop_cycles = wlat - 2;
  2740. gbl->rw_wl_nop_cycles = rw_wl_nop_cycles;
  2741. /*
  2742. * For AV/CV, lfifo is hardened and always runs at full rate so
  2743. * max latency in AFI clocks, used here, is correspondingly smaller.
  2744. */
  2745. max_latency = (1<<MAX_LATENCY_COUNT_WIDTH)/1 - 1;
  2746. /* configure for a burst length of 8 */
  2747. /* write latency */
  2748. /* Adjust Write Latency for Hard PHY */
  2749. wlat = wlat + 1;
  2750. /* set a pretty high read latency initially */
  2751. gbl->curr_read_lat = rlat + 16;
  2752. if (gbl->curr_read_lat > max_latency)
  2753. gbl->curr_read_lat = max_latency;
  2754. writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
  2755. /* advertise write latency */
  2756. gbl->curr_write_lat = wlat;
  2757. writel(wlat - 2, &phy_mgr_cfg->afi_wlat);
  2758. /* initialize bit slips */
  2759. mem_precharge_and_activate();
  2760. }
  2761. /* Set VFIFO and LFIFO to instant-on settings in skip calibration mode */
  2762. static void mem_skip_calibrate(void)
  2763. {
  2764. uint32_t vfifo_offset;
  2765. uint32_t i, j, r;
  2766. debug("%s:%d\n", __func__, __LINE__);
  2767. /* Need to update every shadow register set used by the interface */
  2768. for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
  2769. r += NUM_RANKS_PER_SHADOW_REG) {
  2770. /*
  2771. * Set output phase alignment settings appropriate for
  2772. * skip calibration.
  2773. */
  2774. for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
  2775. scc_mgr_set_dqs_en_phase(i, 0);
  2776. #if IO_DLL_CHAIN_LENGTH == 6
  2777. scc_mgr_set_dqdqs_output_phase(i, 6);
  2778. #else
  2779. scc_mgr_set_dqdqs_output_phase(i, 7);
  2780. #endif
  2781. /*
  2782. * Case:33398
  2783. *
  2784. * Write data arrives to the I/O two cycles before write
  2785. * latency is reached (720 deg).
  2786. * -> due to bit-slip in a/c bus
  2787. * -> to allow board skew where dqs is longer than ck
  2788. * -> how often can this happen!?
  2789. * -> can claim back some ptaps for high freq
  2790. * support if we can relax this, but i digress...
  2791. *
  2792. * The write_clk leads mem_ck by 90 deg
  2793. * The minimum ptap of the OPA is 180 deg
  2794. * Each ptap has (360 / IO_DLL_CHAIN_LENGH) deg of delay
  2795. * The write_clk is always delayed by 2 ptaps
  2796. *
  2797. * Hence, to make DQS aligned to CK, we need to delay
  2798. * DQS by:
  2799. * (720 - 90 - 180 - 2 * (360 / IO_DLL_CHAIN_LENGTH))
  2800. *
  2801. * Dividing the above by (360 / IO_DLL_CHAIN_LENGTH)
  2802. * gives us the number of ptaps, which simplies to:
  2803. *
  2804. * (1.25 * IO_DLL_CHAIN_LENGTH - 2)
  2805. */
  2806. scc_mgr_set_dqdqs_output_phase(i, (1.25 *
  2807. IO_DLL_CHAIN_LENGTH - 2));
  2808. }
  2809. writel(0xff, &sdr_scc_mgr->dqs_ena);
  2810. writel(0xff, &sdr_scc_mgr->dqs_io_ena);
  2811. for (i = 0; i < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; i++) {
  2812. writel(i, SDR_PHYGRP_SCCGRP_ADDRESS |
  2813. SCC_MGR_GROUP_COUNTER_OFFSET);
  2814. }
  2815. writel(0xff, &sdr_scc_mgr->dq_ena);
  2816. writel(0xff, &sdr_scc_mgr->dm_ena);
  2817. writel(0, &sdr_scc_mgr->update);
  2818. }
  2819. /* Compensate for simulation model behaviour */
  2820. for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
  2821. scc_mgr_set_dqs_bus_in_delay(i, 10);
  2822. scc_mgr_load_dqs(i);
  2823. }
  2824. writel(0, &sdr_scc_mgr->update);
  2825. /*
  2826. * ArriaV has hard FIFOs that can only be initialized by incrementing
  2827. * in sequencer.
  2828. */
  2829. vfifo_offset = CALIB_VFIFO_OFFSET;
  2830. for (j = 0; j < vfifo_offset; j++) {
  2831. writel(0xff, &phy_mgr_cmd->inc_vfifo_hard_phy);
  2832. }
  2833. writel(0, &phy_mgr_cmd->fifo_reset);
  2834. /*
  2835. * For ACV with hard lfifo, we get the skip-cal setting from
  2836. * generation-time constant.
  2837. */
  2838. gbl->curr_read_lat = CALIB_LFIFO_OFFSET;
  2839. writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
  2840. }
  2841. /* Memory calibration entry point */
  2842. static uint32_t mem_calibrate(void)
  2843. {
  2844. uint32_t i;
  2845. uint32_t rank_bgn, sr;
  2846. uint32_t write_group, write_test_bgn;
  2847. uint32_t read_group, read_test_bgn;
  2848. uint32_t run_groups, current_run;
  2849. uint32_t failing_groups = 0;
  2850. uint32_t group_failed = 0;
  2851. uint32_t sr_failed = 0;
  2852. debug("%s:%d\n", __func__, __LINE__);
  2853. /* Initialize the data settings */
  2854. gbl->error_substage = CAL_SUBSTAGE_NIL;
  2855. gbl->error_stage = CAL_STAGE_NIL;
  2856. gbl->error_group = 0xff;
  2857. gbl->fom_in = 0;
  2858. gbl->fom_out = 0;
  2859. mem_config();
  2860. uint32_t bypass_mode = 0x1;
  2861. for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
  2862. writel(i, SDR_PHYGRP_SCCGRP_ADDRESS |
  2863. SCC_MGR_GROUP_COUNTER_OFFSET);
  2864. scc_set_bypass_mode(i, bypass_mode);
  2865. }
  2866. if ((dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL) {
  2867. /*
  2868. * Set VFIFO and LFIFO to instant-on settings in skip
  2869. * calibration mode.
  2870. */
  2871. mem_skip_calibrate();
  2872. } else {
  2873. for (i = 0; i < NUM_CALIB_REPEAT; i++) {
  2874. /*
  2875. * Zero all delay chain/phase settings for all
  2876. * groups and all shadow register sets.
  2877. */
  2878. scc_mgr_zero_all();
  2879. run_groups = ~param->skip_groups;
  2880. for (write_group = 0, write_test_bgn = 0; write_group
  2881. < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; write_group++,
  2882. write_test_bgn += RW_MGR_MEM_DQ_PER_WRITE_DQS) {
  2883. /* Initialized the group failure */
  2884. group_failed = 0;
  2885. current_run = run_groups & ((1 <<
  2886. RW_MGR_NUM_DQS_PER_WRITE_GROUP) - 1);
  2887. run_groups = run_groups >>
  2888. RW_MGR_NUM_DQS_PER_WRITE_GROUP;
  2889. if (current_run == 0)
  2890. continue;
  2891. writel(write_group, SDR_PHYGRP_SCCGRP_ADDRESS |
  2892. SCC_MGR_GROUP_COUNTER_OFFSET);
  2893. scc_mgr_zero_group(write_group, write_test_bgn,
  2894. 0);
  2895. for (read_group = write_group *
  2896. RW_MGR_MEM_IF_READ_DQS_WIDTH /
  2897. RW_MGR_MEM_IF_WRITE_DQS_WIDTH,
  2898. read_test_bgn = 0;
  2899. read_group < (write_group + 1) *
  2900. RW_MGR_MEM_IF_READ_DQS_WIDTH /
  2901. RW_MGR_MEM_IF_WRITE_DQS_WIDTH &&
  2902. group_failed == 0;
  2903. read_group++, read_test_bgn +=
  2904. RW_MGR_MEM_DQ_PER_READ_DQS) {
  2905. /* Calibrate the VFIFO */
  2906. if (!((STATIC_CALIB_STEPS) &
  2907. CALIB_SKIP_VFIFO)) {
  2908. if (!rw_mgr_mem_calibrate_vfifo
  2909. (read_group,
  2910. read_test_bgn)) {
  2911. group_failed = 1;
  2912. if (!(gbl->
  2913. phy_debug_mode_flags &
  2914. PHY_DEBUG_SWEEP_ALL_GROUPS)) {
  2915. return 0;
  2916. }
  2917. }
  2918. }
  2919. }
  2920. /* Calibrate the output side */
  2921. if (group_failed == 0) {
  2922. for (rank_bgn = 0, sr = 0; rank_bgn
  2923. < RW_MGR_MEM_NUMBER_OF_RANKS;
  2924. rank_bgn +=
  2925. NUM_RANKS_PER_SHADOW_REG,
  2926. ++sr) {
  2927. sr_failed = 0;
  2928. if (!((STATIC_CALIB_STEPS) &
  2929. CALIB_SKIP_WRITES)) {
  2930. if ((STATIC_CALIB_STEPS)
  2931. & CALIB_SKIP_DELAY_SWEEPS) {
  2932. /* not needed in quick mode! */
  2933. } else {
  2934. /*
  2935. * Determine if this set of
  2936. * ranks should be skipped
  2937. * entirely.
  2938. */
  2939. if (!param->skip_shadow_regs[sr]) {
  2940. if (!rw_mgr_mem_calibrate_writes
  2941. (rank_bgn, write_group,
  2942. write_test_bgn)) {
  2943. sr_failed = 1;
  2944. if (!(gbl->
  2945. phy_debug_mode_flags &
  2946. PHY_DEBUG_SWEEP_ALL_GROUPS)) {
  2947. return 0;
  2948. }
  2949. }
  2950. }
  2951. }
  2952. }
  2953. if (sr_failed != 0)
  2954. group_failed = 1;
  2955. }
  2956. }
  2957. if (group_failed == 0) {
  2958. for (read_group = write_group *
  2959. RW_MGR_MEM_IF_READ_DQS_WIDTH /
  2960. RW_MGR_MEM_IF_WRITE_DQS_WIDTH,
  2961. read_test_bgn = 0;
  2962. read_group < (write_group + 1)
  2963. * RW_MGR_MEM_IF_READ_DQS_WIDTH
  2964. / RW_MGR_MEM_IF_WRITE_DQS_WIDTH &&
  2965. group_failed == 0;
  2966. read_group++, read_test_bgn +=
  2967. RW_MGR_MEM_DQ_PER_READ_DQS) {
  2968. if (!((STATIC_CALIB_STEPS) &
  2969. CALIB_SKIP_WRITES)) {
  2970. if (!rw_mgr_mem_calibrate_vfifo_end
  2971. (read_group, read_test_bgn)) {
  2972. group_failed = 1;
  2973. if (!(gbl->phy_debug_mode_flags
  2974. & PHY_DEBUG_SWEEP_ALL_GROUPS)) {
  2975. return 0;
  2976. }
  2977. }
  2978. }
  2979. }
  2980. }
  2981. if (group_failed != 0)
  2982. failing_groups++;
  2983. }
  2984. /*
  2985. * USER If there are any failing groups then report
  2986. * the failure.
  2987. */
  2988. if (failing_groups != 0)
  2989. return 0;
  2990. /* Calibrate the LFIFO */
  2991. if (!((STATIC_CALIB_STEPS) & CALIB_SKIP_LFIFO)) {
  2992. /*
  2993. * If we're skipping groups as part of debug,
  2994. * don't calibrate LFIFO.
  2995. */
  2996. if (param->skip_groups == 0) {
  2997. if (!rw_mgr_mem_calibrate_lfifo())
  2998. return 0;
  2999. }
  3000. }
  3001. }
  3002. }
  3003. /*
  3004. * Do not remove this line as it makes sure all of our decisions
  3005. * have been applied.
  3006. */
  3007. writel(0, &sdr_scc_mgr->update);
  3008. return 1;
  3009. }
  3010. static uint32_t run_mem_calibrate(void)
  3011. {
  3012. uint32_t pass;
  3013. uint32_t debug_info;
  3014. debug("%s:%d\n", __func__, __LINE__);
  3015. /* Reset pass/fail status shown on afi_cal_success/fail */
  3016. writel(PHY_MGR_CAL_RESET, &phy_mgr_cfg->cal_status);
  3017. /* stop tracking manger */
  3018. uint32_t ctrlcfg = readl(&sdr_ctrl->ctrl_cfg);
  3019. writel(ctrlcfg & 0xFFBFFFFF, &sdr_ctrl->ctrl_cfg);
  3020. initialize();
  3021. rw_mgr_mem_initialize();
  3022. pass = mem_calibrate();
  3023. mem_precharge_and_activate();
  3024. writel(0, &phy_mgr_cmd->fifo_reset);
  3025. /*
  3026. * Handoff:
  3027. * Don't return control of the PHY back to AFI when in debug mode.
  3028. */
  3029. if ((gbl->phy_debug_mode_flags & PHY_DEBUG_IN_DEBUG_MODE) == 0) {
  3030. rw_mgr_mem_handoff();
  3031. /*
  3032. * In Hard PHY this is a 2-bit control:
  3033. * 0: AFI Mux Select
  3034. * 1: DDIO Mux Select
  3035. */
  3036. writel(0x2, &phy_mgr_cfg->mux_sel);
  3037. }
  3038. writel(ctrlcfg, &sdr_ctrl->ctrl_cfg);
  3039. if (pass) {
  3040. printf("%s: CALIBRATION PASSED\n", __FILE__);
  3041. gbl->fom_in /= 2;
  3042. gbl->fom_out /= 2;
  3043. if (gbl->fom_in > 0xff)
  3044. gbl->fom_in = 0xff;
  3045. if (gbl->fom_out > 0xff)
  3046. gbl->fom_out = 0xff;
  3047. /* Update the FOM in the register file */
  3048. debug_info = gbl->fom_in;
  3049. debug_info |= gbl->fom_out << 8;
  3050. writel(debug_info, &sdr_reg_file->fom);
  3051. writel(debug_info, &phy_mgr_cfg->cal_debug_info);
  3052. writel(PHY_MGR_CAL_SUCCESS, &phy_mgr_cfg->cal_status);
  3053. } else {
  3054. printf("%s: CALIBRATION FAILED\n", __FILE__);
  3055. debug_info = gbl->error_stage;
  3056. debug_info |= gbl->error_substage << 8;
  3057. debug_info |= gbl->error_group << 16;
  3058. writel(debug_info, &sdr_reg_file->failing_stage);
  3059. writel(debug_info, &phy_mgr_cfg->cal_debug_info);
  3060. writel(PHY_MGR_CAL_FAIL, &phy_mgr_cfg->cal_status);
  3061. /* Update the failing group/stage in the register file */
  3062. debug_info = gbl->error_stage;
  3063. debug_info |= gbl->error_substage << 8;
  3064. debug_info |= gbl->error_group << 16;
  3065. writel(debug_info, &sdr_reg_file->failing_stage);
  3066. }
  3067. return pass;
  3068. }
  3069. /**
  3070. * hc_initialize_rom_data() - Initialize ROM data
  3071. *
  3072. * Initialize ROM data.
  3073. */
  3074. static void hc_initialize_rom_data(void)
  3075. {
  3076. u32 i, addr;
  3077. addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_INST_ROM_WRITE_OFFSET;
  3078. for (i = 0; i < ARRAY_SIZE(inst_rom_init); i++)
  3079. writel(inst_rom_init[i], addr + (i << 2));
  3080. addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_AC_ROM_WRITE_OFFSET;
  3081. for (i = 0; i < ARRAY_SIZE(ac_rom_init); i++)
  3082. writel(ac_rom_init[i], addr + (i << 2));
  3083. }
  3084. /**
  3085. * initialize_reg_file() - Initialize SDR register file
  3086. *
  3087. * Initialize SDR register file.
  3088. */
  3089. static void initialize_reg_file(void)
  3090. {
  3091. /* Initialize the register file with the correct data */
  3092. writel(REG_FILE_INIT_SEQ_SIGNATURE, &sdr_reg_file->signature);
  3093. writel(0, &sdr_reg_file->debug_data_addr);
  3094. writel(0, &sdr_reg_file->cur_stage);
  3095. writel(0, &sdr_reg_file->fom);
  3096. writel(0, &sdr_reg_file->failing_stage);
  3097. writel(0, &sdr_reg_file->debug1);
  3098. writel(0, &sdr_reg_file->debug2);
  3099. }
  3100. /**
  3101. * initialize_hps_phy() - Initialize HPS PHY
  3102. *
  3103. * Initialize HPS PHY.
  3104. */
  3105. static void initialize_hps_phy(void)
  3106. {
  3107. uint32_t reg;
  3108. /*
  3109. * Tracking also gets configured here because it's in the
  3110. * same register.
  3111. */
  3112. uint32_t trk_sample_count = 7500;
  3113. uint32_t trk_long_idle_sample_count = (10 << 16) | 100;
  3114. /*
  3115. * Format is number of outer loops in the 16 MSB, sample
  3116. * count in 16 LSB.
  3117. */
  3118. reg = 0;
  3119. reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ACDELAYEN_SET(2);
  3120. reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQDELAYEN_SET(1);
  3121. reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSDELAYEN_SET(1);
  3122. reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSLOGICDELAYEN_SET(1);
  3123. reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_RESETDELAYEN_SET(0);
  3124. reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_LPDDRDIS_SET(1);
  3125. /*
  3126. * This field selects the intrinsic latency to RDATA_EN/FULL path.
  3127. * 00-bypass, 01- add 5 cycles, 10- add 10 cycles, 11- add 15 cycles.
  3128. */
  3129. reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDLATSEL_SET(0);
  3130. reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_SET(
  3131. trk_sample_count);
  3132. writel(reg, &sdr_ctrl->phy_ctrl0);
  3133. reg = 0;
  3134. reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_SAMPLECOUNT_31_20_SET(
  3135. trk_sample_count >>
  3136. SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_WIDTH);
  3137. reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_SET(
  3138. trk_long_idle_sample_count);
  3139. writel(reg, &sdr_ctrl->phy_ctrl1);
  3140. reg = 0;
  3141. reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_LONGIDLESAMPLECOUNT_31_20_SET(
  3142. trk_long_idle_sample_count >>
  3143. SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_WIDTH);
  3144. writel(reg, &sdr_ctrl->phy_ctrl2);
  3145. }
  3146. static void initialize_tracking(void)
  3147. {
  3148. uint32_t concatenated_longidle = 0x0;
  3149. uint32_t concatenated_delays = 0x0;
  3150. uint32_t concatenated_rw_addr = 0x0;
  3151. uint32_t concatenated_refresh = 0x0;
  3152. uint32_t trk_sample_count = 7500;
  3153. uint32_t dtaps_per_ptap;
  3154. uint32_t tmp_delay;
  3155. /*
  3156. * compute usable version of value in case we skip full
  3157. * computation later
  3158. */
  3159. dtaps_per_ptap = 0;
  3160. tmp_delay = 0;
  3161. while (tmp_delay < IO_DELAY_PER_OPA_TAP) {
  3162. dtaps_per_ptap++;
  3163. tmp_delay += IO_DELAY_PER_DCHAIN_TAP;
  3164. }
  3165. dtaps_per_ptap--;
  3166. concatenated_longidle = concatenated_longidle ^ 10;
  3167. /*longidle outer loop */
  3168. concatenated_longidle = concatenated_longidle << 16;
  3169. concatenated_longidle = concatenated_longidle ^ 100;
  3170. /*longidle sample count */
  3171. concatenated_delays = concatenated_delays ^ 243;
  3172. /* trfc, worst case of 933Mhz 4Gb */
  3173. concatenated_delays = concatenated_delays << 8;
  3174. concatenated_delays = concatenated_delays ^ 14;
  3175. /* trcd, worst case */
  3176. concatenated_delays = concatenated_delays << 8;
  3177. concatenated_delays = concatenated_delays ^ 10;
  3178. /* vfifo wait */
  3179. concatenated_delays = concatenated_delays << 8;
  3180. concatenated_delays = concatenated_delays ^ 4;
  3181. /* mux delay */
  3182. concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_IDLE;
  3183. concatenated_rw_addr = concatenated_rw_addr << 8;
  3184. concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_ACTIVATE_1;
  3185. concatenated_rw_addr = concatenated_rw_addr << 8;
  3186. concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_SGLE_READ;
  3187. concatenated_rw_addr = concatenated_rw_addr << 8;
  3188. concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_PRECHARGE_ALL;
  3189. concatenated_refresh = concatenated_refresh ^ RW_MGR_REFRESH_ALL;
  3190. concatenated_refresh = concatenated_refresh << 24;
  3191. concatenated_refresh = concatenated_refresh ^ 1000; /* trefi */
  3192. /* Initialize the register file with the correct data */
  3193. writel(dtaps_per_ptap, &sdr_reg_file->dtaps_per_ptap);
  3194. writel(trk_sample_count, &sdr_reg_file->trk_sample_count);
  3195. writel(concatenated_longidle, &sdr_reg_file->trk_longidle);
  3196. writel(concatenated_delays, &sdr_reg_file->delays);
  3197. writel(concatenated_rw_addr, &sdr_reg_file->trk_rw_mgr_addr);
  3198. writel(RW_MGR_MEM_IF_READ_DQS_WIDTH, &sdr_reg_file->trk_read_dqs_width);
  3199. writel(concatenated_refresh, &sdr_reg_file->trk_rfsh);
  3200. }
  3201. int sdram_calibration_full(void)
  3202. {
  3203. struct param_type my_param;
  3204. struct gbl_type my_gbl;
  3205. uint32_t pass;
  3206. uint32_t i;
  3207. param = &my_param;
  3208. gbl = &my_gbl;
  3209. /* Initialize the debug mode flags */
  3210. gbl->phy_debug_mode_flags = 0;
  3211. /* Set the calibration enabled by default */
  3212. gbl->phy_debug_mode_flags |= PHY_DEBUG_ENABLE_CAL_RPT;
  3213. /*
  3214. * Only sweep all groups (regardless of fail state) by default
  3215. * Set enabled read test by default.
  3216. */
  3217. #if DISABLE_GUARANTEED_READ
  3218. gbl->phy_debug_mode_flags |= PHY_DEBUG_DISABLE_GUARANTEED_READ;
  3219. #endif
  3220. /* Initialize the register file */
  3221. initialize_reg_file();
  3222. /* Initialize any PHY CSR */
  3223. initialize_hps_phy();
  3224. scc_mgr_initialize();
  3225. initialize_tracking();
  3226. /* USER Enable all ranks, groups */
  3227. for (i = 0; i < RW_MGR_MEM_NUMBER_OF_RANKS; i++)
  3228. param->skip_ranks[i] = 0;
  3229. for (i = 0; i < NUM_SHADOW_REGS; ++i)
  3230. param->skip_shadow_regs[i] = 0;
  3231. param->skip_groups = 0;
  3232. printf("%s: Preparing to start memory calibration\n", __FILE__);
  3233. debug("%s:%d\n", __func__, __LINE__);
  3234. debug_cond(DLEVEL == 1,
  3235. "DDR3 FULL_RATE ranks=%u cs/dimm=%u dq/dqs=%u,%u vg/dqs=%u,%u ",
  3236. RW_MGR_MEM_NUMBER_OF_RANKS, RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM,
  3237. RW_MGR_MEM_DQ_PER_READ_DQS, RW_MGR_MEM_DQ_PER_WRITE_DQS,
  3238. RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS,
  3239. RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS);
  3240. debug_cond(DLEVEL == 1,
  3241. "dqs=%u,%u dq=%u dm=%u ptap_delay=%u dtap_delay=%u ",
  3242. RW_MGR_MEM_IF_READ_DQS_WIDTH, RW_MGR_MEM_IF_WRITE_DQS_WIDTH,
  3243. RW_MGR_MEM_DATA_WIDTH, RW_MGR_MEM_DATA_MASK_WIDTH,
  3244. IO_DELAY_PER_OPA_TAP, IO_DELAY_PER_DCHAIN_TAP);
  3245. debug_cond(DLEVEL == 1, "dtap_dqsen_delay=%u, dll=%u",
  3246. IO_DELAY_PER_DQS_EN_DCHAIN_TAP, IO_DLL_CHAIN_LENGTH);
  3247. debug_cond(DLEVEL == 1, "max values: en_p=%u dqdqs_p=%u en_d=%u dqs_in_d=%u ",
  3248. IO_DQS_EN_PHASE_MAX, IO_DQDQS_OUT_PHASE_MAX,
  3249. IO_DQS_EN_DELAY_MAX, IO_DQS_IN_DELAY_MAX);
  3250. debug_cond(DLEVEL == 1, "io_in_d=%u io_out1_d=%u io_out2_d=%u ",
  3251. IO_IO_IN_DELAY_MAX, IO_IO_OUT1_DELAY_MAX,
  3252. IO_IO_OUT2_DELAY_MAX);
  3253. debug_cond(DLEVEL == 1, "dqs_in_reserve=%u dqs_out_reserve=%u\n",
  3254. IO_DQS_IN_RESERVE, IO_DQS_OUT_RESERVE);
  3255. hc_initialize_rom_data();
  3256. /* update info for sims */
  3257. reg_file_set_stage(CAL_STAGE_NIL);
  3258. reg_file_set_group(0);
  3259. /*
  3260. * Load global needed for those actions that require
  3261. * some dynamic calibration support.
  3262. */
  3263. dyn_calib_steps = STATIC_CALIB_STEPS;
  3264. /*
  3265. * Load global to allow dynamic selection of delay loop settings
  3266. * based on calibration mode.
  3267. */
  3268. if (!(dyn_calib_steps & CALIB_SKIP_DELAY_LOOPS))
  3269. skip_delay_mask = 0xff;
  3270. else
  3271. skip_delay_mask = 0x0;
  3272. pass = run_mem_calibrate();
  3273. printf("%s: Calibration complete\n", __FILE__);
  3274. return pass;
  3275. }