ivc.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2016, NVIDIA CORPORATION.
  4. */
  5. #include <common.h>
  6. #include <asm/io.h>
  7. #include <asm/arch-tegra/ivc.h>
  8. #define TEGRA_IVC_ALIGN 64
  9. /*
  10. * IVC channel reset protocol.
  11. *
  12. * Each end uses its tx_channel.state to indicate its synchronization state.
  13. */
  14. enum ivc_state {
  15. /*
  16. * This value is zero for backwards compatibility with services that
  17. * assume channels to be initially zeroed. Such channels are in an
  18. * initially valid state, but cannot be asynchronously reset, and must
  19. * maintain a valid state at all times.
  20. *
  21. * The transmitting end can enter the established state from the sync or
  22. * ack state when it observes the receiving endpoint in the ack or
  23. * established state, indicating that has cleared the counters in our
  24. * rx_channel.
  25. */
  26. ivc_state_established = 0,
  27. /*
  28. * If an endpoint is observed in the sync state, the remote endpoint is
  29. * allowed to clear the counters it owns asynchronously with respect to
  30. * the current endpoint. Therefore, the current endpoint is no longer
  31. * allowed to communicate.
  32. */
  33. ivc_state_sync,
  34. /*
  35. * When the transmitting end observes the receiving end in the sync
  36. * state, it can clear the w_count and r_count and transition to the ack
  37. * state. If the remote endpoint observes us in the ack state, it can
  38. * return to the established state once it has cleared its counters.
  39. */
  40. ivc_state_ack
  41. };
  42. /*
  43. * This structure is divided into two-cache aligned parts, the first is only
  44. * written through the tx_channel pointer, while the second is only written
  45. * through the rx_channel pointer. This delineates ownership of the cache lines,
  46. * which is critical to performance and necessary in non-cache coherent
  47. * implementations.
  48. */
  49. struct tegra_ivc_channel_header {
  50. union {
  51. /* fields owned by the transmitting end */
  52. struct {
  53. uint32_t w_count;
  54. uint32_t state;
  55. };
  56. uint8_t w_align[TEGRA_IVC_ALIGN];
  57. };
  58. union {
  59. /* fields owned by the receiving end */
  60. uint32_t r_count;
  61. uint8_t r_align[TEGRA_IVC_ALIGN];
  62. };
  63. };
  64. static inline void tegra_ivc_invalidate_counter(struct tegra_ivc *ivc,
  65. struct tegra_ivc_channel_header *h,
  66. ulong offset)
  67. {
  68. ulong base = ((ulong)h) + offset;
  69. invalidate_dcache_range(base, base + TEGRA_IVC_ALIGN);
  70. }
  71. static inline void tegra_ivc_flush_counter(struct tegra_ivc *ivc,
  72. struct tegra_ivc_channel_header *h,
  73. ulong offset)
  74. {
  75. ulong base = ((ulong)h) + offset;
  76. flush_dcache_range(base, base + TEGRA_IVC_ALIGN);
  77. }
  78. static inline ulong tegra_ivc_frame_addr(struct tegra_ivc *ivc,
  79. struct tegra_ivc_channel_header *h,
  80. uint32_t frame)
  81. {
  82. BUG_ON(frame >= ivc->nframes);
  83. return ((ulong)h) + sizeof(struct tegra_ivc_channel_header) +
  84. (ivc->frame_size * frame);
  85. }
  86. static inline void *tegra_ivc_frame_pointer(struct tegra_ivc *ivc,
  87. struct tegra_ivc_channel_header *ch,
  88. uint32_t frame)
  89. {
  90. return (void *)tegra_ivc_frame_addr(ivc, ch, frame);
  91. }
  92. static inline void tegra_ivc_invalidate_frame(struct tegra_ivc *ivc,
  93. struct tegra_ivc_channel_header *h,
  94. unsigned frame)
  95. {
  96. ulong base = tegra_ivc_frame_addr(ivc, h, frame);
  97. invalidate_dcache_range(base, base + ivc->frame_size);
  98. }
  99. static inline void tegra_ivc_flush_frame(struct tegra_ivc *ivc,
  100. struct tegra_ivc_channel_header *h,
  101. unsigned frame)
  102. {
  103. ulong base = tegra_ivc_frame_addr(ivc, h, frame);
  104. flush_dcache_range(base, base + ivc->frame_size);
  105. }
  106. static inline int tegra_ivc_channel_empty(struct tegra_ivc *ivc,
  107. struct tegra_ivc_channel_header *ch)
  108. {
  109. /*
  110. * This function performs multiple checks on the same values with
  111. * security implications, so create snapshots with ACCESS_ONCE() to
  112. * ensure that these checks use the same values.
  113. */
  114. uint32_t w_count = ACCESS_ONCE(ch->w_count);
  115. uint32_t r_count = ACCESS_ONCE(ch->r_count);
  116. /*
  117. * Perform an over-full check to prevent denial of service attacks where
  118. * a server could be easily fooled into believing that there's an
  119. * extremely large number of frames ready, since receivers are not
  120. * expected to check for full or over-full conditions.
  121. *
  122. * Although the channel isn't empty, this is an invalid case caused by
  123. * a potentially malicious peer, so returning empty is safer, because it
  124. * gives the impression that the channel has gone silent.
  125. */
  126. if (w_count - r_count > ivc->nframes)
  127. return 1;
  128. return w_count == r_count;
  129. }
  130. static inline int tegra_ivc_channel_full(struct tegra_ivc *ivc,
  131. struct tegra_ivc_channel_header *ch)
  132. {
  133. /*
  134. * Invalid cases where the counters indicate that the queue is over
  135. * capacity also appear full.
  136. */
  137. return (ACCESS_ONCE(ch->w_count) - ACCESS_ONCE(ch->r_count)) >=
  138. ivc->nframes;
  139. }
  140. static inline void tegra_ivc_advance_rx(struct tegra_ivc *ivc)
  141. {
  142. ACCESS_ONCE(ivc->rx_channel->r_count) =
  143. ACCESS_ONCE(ivc->rx_channel->r_count) + 1;
  144. if (ivc->r_pos == ivc->nframes - 1)
  145. ivc->r_pos = 0;
  146. else
  147. ivc->r_pos++;
  148. }
  149. static inline void tegra_ivc_advance_tx(struct tegra_ivc *ivc)
  150. {
  151. ACCESS_ONCE(ivc->tx_channel->w_count) =
  152. ACCESS_ONCE(ivc->tx_channel->w_count) + 1;
  153. if (ivc->w_pos == ivc->nframes - 1)
  154. ivc->w_pos = 0;
  155. else
  156. ivc->w_pos++;
  157. }
  158. static inline int tegra_ivc_check_read(struct tegra_ivc *ivc)
  159. {
  160. ulong offset;
  161. /*
  162. * tx_channel->state is set locally, so it is not synchronized with
  163. * state from the remote peer. The remote peer cannot reset its
  164. * transmit counters until we've acknowledged its synchronization
  165. * request, so no additional synchronization is required because an
  166. * asynchronous transition of rx_channel->state to ivc_state_ack is not
  167. * allowed.
  168. */
  169. if (ivc->tx_channel->state != ivc_state_established)
  170. return -ECONNRESET;
  171. /*
  172. * Avoid unnecessary invalidations when performing repeated accesses to
  173. * an IVC channel by checking the old queue pointers first.
  174. * Synchronization is only necessary when these pointers indicate empty
  175. * or full.
  176. */
  177. if (!tegra_ivc_channel_empty(ivc, ivc->rx_channel))
  178. return 0;
  179. offset = offsetof(struct tegra_ivc_channel_header, w_count);
  180. tegra_ivc_invalidate_counter(ivc, ivc->rx_channel, offset);
  181. return tegra_ivc_channel_empty(ivc, ivc->rx_channel) ? -ENOMEM : 0;
  182. }
  183. static inline int tegra_ivc_check_write(struct tegra_ivc *ivc)
  184. {
  185. ulong offset;
  186. if (ivc->tx_channel->state != ivc_state_established)
  187. return -ECONNRESET;
  188. if (!tegra_ivc_channel_full(ivc, ivc->tx_channel))
  189. return 0;
  190. offset = offsetof(struct tegra_ivc_channel_header, r_count);
  191. tegra_ivc_invalidate_counter(ivc, ivc->tx_channel, offset);
  192. return tegra_ivc_channel_full(ivc, ivc->tx_channel) ? -ENOMEM : 0;
  193. }
  194. static inline uint32_t tegra_ivc_channel_avail_count(struct tegra_ivc *ivc,
  195. struct tegra_ivc_channel_header *ch)
  196. {
  197. /*
  198. * This function isn't expected to be used in scenarios where an
  199. * over-full situation can lead to denial of service attacks. See the
  200. * comment in tegra_ivc_channel_empty() for an explanation about
  201. * special over-full considerations.
  202. */
  203. return ACCESS_ONCE(ch->w_count) - ACCESS_ONCE(ch->r_count);
  204. }
  205. int tegra_ivc_read_get_next_frame(struct tegra_ivc *ivc, void **frame)
  206. {
  207. int result = tegra_ivc_check_read(ivc);
  208. if (result < 0)
  209. return result;
  210. /*
  211. * Order observation of w_pos potentially indicating new data before
  212. * data read.
  213. */
  214. mb();
  215. tegra_ivc_invalidate_frame(ivc, ivc->rx_channel, ivc->r_pos);
  216. *frame = tegra_ivc_frame_pointer(ivc, ivc->rx_channel, ivc->r_pos);
  217. return 0;
  218. }
  219. int tegra_ivc_read_advance(struct tegra_ivc *ivc)
  220. {
  221. ulong offset;
  222. int result;
  223. /*
  224. * No read barriers or synchronization here: the caller is expected to
  225. * have already observed the channel non-empty. This check is just to
  226. * catch programming errors.
  227. */
  228. result = tegra_ivc_check_read(ivc);
  229. if (result)
  230. return result;
  231. tegra_ivc_advance_rx(ivc);
  232. offset = offsetof(struct tegra_ivc_channel_header, r_count);
  233. tegra_ivc_flush_counter(ivc, ivc->rx_channel, offset);
  234. /*
  235. * Ensure our write to r_pos occurs before our read from w_pos.
  236. */
  237. mb();
  238. offset = offsetof(struct tegra_ivc_channel_header, w_count);
  239. tegra_ivc_invalidate_counter(ivc, ivc->rx_channel, offset);
  240. if (tegra_ivc_channel_avail_count(ivc, ivc->rx_channel) ==
  241. ivc->nframes - 1)
  242. ivc->notify(ivc);
  243. return 0;
  244. }
  245. int tegra_ivc_write_get_next_frame(struct tegra_ivc *ivc, void **frame)
  246. {
  247. int result = tegra_ivc_check_write(ivc);
  248. if (result)
  249. return result;
  250. *frame = tegra_ivc_frame_pointer(ivc, ivc->tx_channel, ivc->w_pos);
  251. return 0;
  252. }
  253. int tegra_ivc_write_advance(struct tegra_ivc *ivc)
  254. {
  255. ulong offset;
  256. int result;
  257. result = tegra_ivc_check_write(ivc);
  258. if (result)
  259. return result;
  260. tegra_ivc_flush_frame(ivc, ivc->tx_channel, ivc->w_pos);
  261. /*
  262. * Order any possible stores to the frame before update of w_pos.
  263. */
  264. mb();
  265. tegra_ivc_advance_tx(ivc);
  266. offset = offsetof(struct tegra_ivc_channel_header, w_count);
  267. tegra_ivc_flush_counter(ivc, ivc->tx_channel, offset);
  268. /*
  269. * Ensure our write to w_pos occurs before our read from r_pos.
  270. */
  271. mb();
  272. offset = offsetof(struct tegra_ivc_channel_header, r_count);
  273. tegra_ivc_invalidate_counter(ivc, ivc->tx_channel, offset);
  274. if (tegra_ivc_channel_avail_count(ivc, ivc->tx_channel) == 1)
  275. ivc->notify(ivc);
  276. return 0;
  277. }
  278. /*
  279. * ===============================================================
  280. * IVC State Transition Table - see tegra_ivc_channel_notified()
  281. * ===============================================================
  282. *
  283. * local remote action
  284. * ----- ------ -----------------------------------
  285. * SYNC EST <none>
  286. * SYNC ACK reset counters; move to EST; notify
  287. * SYNC SYNC reset counters; move to ACK; notify
  288. * ACK EST move to EST; notify
  289. * ACK ACK move to EST; notify
  290. * ACK SYNC reset counters; move to ACK; notify
  291. * EST EST <none>
  292. * EST ACK <none>
  293. * EST SYNC reset counters; move to ACK; notify
  294. *
  295. * ===============================================================
  296. */
  297. int tegra_ivc_channel_notified(struct tegra_ivc *ivc)
  298. {
  299. ulong offset;
  300. enum ivc_state peer_state;
  301. /* Copy the receiver's state out of shared memory. */
  302. offset = offsetof(struct tegra_ivc_channel_header, w_count);
  303. tegra_ivc_invalidate_counter(ivc, ivc->rx_channel, offset);
  304. peer_state = ACCESS_ONCE(ivc->rx_channel->state);
  305. if (peer_state == ivc_state_sync) {
  306. /*
  307. * Order observation of ivc_state_sync before stores clearing
  308. * tx_channel.
  309. */
  310. mb();
  311. /*
  312. * Reset tx_channel counters. The remote end is in the SYNC
  313. * state and won't make progress until we change our state,
  314. * so the counters are not in use at this time.
  315. */
  316. ivc->tx_channel->w_count = 0;
  317. ivc->rx_channel->r_count = 0;
  318. ivc->w_pos = 0;
  319. ivc->r_pos = 0;
  320. /*
  321. * Ensure that counters appear cleared before new state can be
  322. * observed.
  323. */
  324. mb();
  325. /*
  326. * Move to ACK state. We have just cleared our counters, so it
  327. * is now safe for the remote end to start using these values.
  328. */
  329. ivc->tx_channel->state = ivc_state_ack;
  330. offset = offsetof(struct tegra_ivc_channel_header, w_count);
  331. tegra_ivc_flush_counter(ivc, ivc->tx_channel, offset);
  332. /*
  333. * Notify remote end to observe state transition.
  334. */
  335. ivc->notify(ivc);
  336. } else if (ivc->tx_channel->state == ivc_state_sync &&
  337. peer_state == ivc_state_ack) {
  338. /*
  339. * Order observation of ivc_state_sync before stores clearing
  340. * tx_channel.
  341. */
  342. mb();
  343. /*
  344. * Reset tx_channel counters. The remote end is in the ACK
  345. * state and won't make progress until we change our state,
  346. * so the counters are not in use at this time.
  347. */
  348. ivc->tx_channel->w_count = 0;
  349. ivc->rx_channel->r_count = 0;
  350. ivc->w_pos = 0;
  351. ivc->r_pos = 0;
  352. /*
  353. * Ensure that counters appear cleared before new state can be
  354. * observed.
  355. */
  356. mb();
  357. /*
  358. * Move to ESTABLISHED state. We know that the remote end has
  359. * already cleared its counters, so it is safe to start
  360. * writing/reading on this channel.
  361. */
  362. ivc->tx_channel->state = ivc_state_established;
  363. offset = offsetof(struct tegra_ivc_channel_header, w_count);
  364. tegra_ivc_flush_counter(ivc, ivc->tx_channel, offset);
  365. /*
  366. * Notify remote end to observe state transition.
  367. */
  368. ivc->notify(ivc);
  369. } else if (ivc->tx_channel->state == ivc_state_ack) {
  370. /*
  371. * At this point, we have observed the peer to be in either
  372. * the ACK or ESTABLISHED state. Next, order observation of
  373. * peer state before storing to tx_channel.
  374. */
  375. mb();
  376. /*
  377. * Move to ESTABLISHED state. We know that we have previously
  378. * cleared our counters, and we know that the remote end has
  379. * cleared its counters, so it is safe to start writing/reading
  380. * on this channel.
  381. */
  382. ivc->tx_channel->state = ivc_state_established;
  383. offset = offsetof(struct tegra_ivc_channel_header, w_count);
  384. tegra_ivc_flush_counter(ivc, ivc->tx_channel, offset);
  385. /*
  386. * Notify remote end to observe state transition.
  387. */
  388. ivc->notify(ivc);
  389. } else {
  390. /*
  391. * There is no need to handle any further action. Either the
  392. * channel is already fully established, or we are waiting for
  393. * the remote end to catch up with our current state. Refer
  394. * to the diagram in "IVC State Transition Table" above.
  395. */
  396. }
  397. if (ivc->tx_channel->state != ivc_state_established)
  398. return -EAGAIN;
  399. return 0;
  400. }
  401. void tegra_ivc_channel_reset(struct tegra_ivc *ivc)
  402. {
  403. ulong offset;
  404. ivc->tx_channel->state = ivc_state_sync;
  405. offset = offsetof(struct tegra_ivc_channel_header, w_count);
  406. tegra_ivc_flush_counter(ivc, ivc->tx_channel, offset);
  407. ivc->notify(ivc);
  408. }
  409. static int check_ivc_params(ulong qbase1, ulong qbase2, uint32_t nframes,
  410. uint32_t frame_size)
  411. {
  412. int ret = 0;
  413. BUG_ON(offsetof(struct tegra_ivc_channel_header, w_count) &
  414. (TEGRA_IVC_ALIGN - 1));
  415. BUG_ON(offsetof(struct tegra_ivc_channel_header, r_count) &
  416. (TEGRA_IVC_ALIGN - 1));
  417. BUG_ON(sizeof(struct tegra_ivc_channel_header) &
  418. (TEGRA_IVC_ALIGN - 1));
  419. if ((uint64_t)nframes * (uint64_t)frame_size >= 0x100000000) {
  420. pr_err("tegra_ivc: nframes * frame_size overflows\n");
  421. return -EINVAL;
  422. }
  423. /*
  424. * The headers must at least be aligned enough for counters
  425. * to be accessed atomically.
  426. */
  427. if ((qbase1 & (TEGRA_IVC_ALIGN - 1)) ||
  428. (qbase2 & (TEGRA_IVC_ALIGN - 1))) {
  429. pr_err("tegra_ivc: channel start not aligned\n");
  430. return -EINVAL;
  431. }
  432. if (frame_size & (TEGRA_IVC_ALIGN - 1)) {
  433. pr_err("tegra_ivc: frame size not adequately aligned\n");
  434. return -EINVAL;
  435. }
  436. if (qbase1 < qbase2) {
  437. if (qbase1 + frame_size * nframes > qbase2)
  438. ret = -EINVAL;
  439. } else {
  440. if (qbase2 + frame_size * nframes > qbase1)
  441. ret = -EINVAL;
  442. }
  443. if (ret) {
  444. pr_err("tegra_ivc: queue regions overlap\n");
  445. return ret;
  446. }
  447. return 0;
  448. }
  449. int tegra_ivc_init(struct tegra_ivc *ivc, ulong rx_base, ulong tx_base,
  450. uint32_t nframes, uint32_t frame_size,
  451. void (*notify)(struct tegra_ivc *))
  452. {
  453. int ret;
  454. if (!ivc)
  455. return -EINVAL;
  456. ret = check_ivc_params(rx_base, tx_base, nframes, frame_size);
  457. if (ret)
  458. return ret;
  459. ivc->rx_channel = (struct tegra_ivc_channel_header *)rx_base;
  460. ivc->tx_channel = (struct tegra_ivc_channel_header *)tx_base;
  461. ivc->w_pos = 0;
  462. ivc->r_pos = 0;
  463. ivc->nframes = nframes;
  464. ivc->frame_size = frame_size;
  465. ivc->notify = notify;
  466. return 0;
  467. }