mpp_list.cpp 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722
  1. /*
  2. * Copyright 2015 Rockchip Electronics Co. LTD
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #define MODULE_TAG "mpp_list"
  17. #include <stdlib.h>
  18. #include <string.h>
  19. #include <stdarg.h>
  20. #include <errno.h>
  21. #include "mpp_log.h"
  22. #include "mpp_list.h"
  23. #include "mpp_common.h"
  24. #define LIST_DEBUG(fmt, ...) mpp_log(fmt, ## __VA_ARGS__)
  25. #define LIST_ERROR(fmt, ...) mpp_err(fmt, ## __VA_ARGS__)
  26. RK_U32 mpp_list::keys = 0;
  27. typedef struct mpp_list_node {
  28. mpp_list_node* prev;
  29. mpp_list_node* next;
  30. RK_U32 key;
  31. RK_S32 size;
  32. } mpp_list_node;
  33. static inline void list_node_init(mpp_list_node *node)
  34. {
  35. node->prev = node->next = node;
  36. }
  37. static inline void list_node_init_with_key_and_size(mpp_list_node *node, RK_U32 key, RK_S32 size)
  38. {
  39. list_node_init(node);
  40. node->key = key;
  41. node->size = size;
  42. }
  43. static mpp_list_node* create_list(void *data, RK_S32 size, RK_U32 key)
  44. {
  45. mpp_list_node *node = (mpp_list_node*)malloc(sizeof(mpp_list_node) + size);
  46. if (node) {
  47. void *dst = (void*)(node + 1);
  48. list_node_init_with_key_and_size(node, key, size);
  49. memcpy(dst, data, size);
  50. } else {
  51. LIST_ERROR("failed to allocate list node");
  52. }
  53. return node;
  54. }
  55. static inline void _mpp_list_add(mpp_list_node * _new, mpp_list_node * prev, mpp_list_node * next)
  56. {
  57. next->prev = _new;
  58. _new->next = next;
  59. _new->prev = prev;
  60. prev->next = _new;
  61. }
  62. static inline void mpp_list_add(mpp_list_node *_new, mpp_list_node *head)
  63. {
  64. _mpp_list_add(_new, head, head->next);
  65. }
  66. static inline void mpp_list_add_tail(mpp_list_node *_new, mpp_list_node *head)
  67. {
  68. _mpp_list_add(_new, head->prev, head);
  69. }
  70. RK_S32 mpp_list::add_at_head(void *data, RK_S32 size)
  71. {
  72. RK_S32 ret = -EINVAL;
  73. if (head) {
  74. mpp_list_node *node = create_list(data, size, 0);
  75. if (node) {
  76. mpp_list_add(node, head);
  77. count++;
  78. ret = 0;
  79. } else {
  80. ret = -ENOMEM;
  81. }
  82. }
  83. return ret;
  84. }
  85. RK_S32 mpp_list::add_at_tail(void *data, RK_S32 size)
  86. {
  87. RK_S32 ret = -EINVAL;
  88. if (head) {
  89. mpp_list_node *node = create_list(data, size, 0);
  90. if (node) {
  91. mpp_list_add_tail(node, head);
  92. count++;
  93. ret = 0;
  94. } else {
  95. ret = -ENOMEM;
  96. }
  97. }
  98. return ret;
  99. }
  100. static void release_list(mpp_list_node*node, void *data, RK_S32 size)
  101. {
  102. void *src = (void*)(node + 1);
  103. if (node->size == size) {
  104. if (data)
  105. memcpy(data, src, size);
  106. } else {
  107. LIST_ERROR("node size check failed when release_list");
  108. size = (size < node->size) ? (size) : (node->size);
  109. if (data)
  110. memcpy(data, src, size);
  111. }
  112. free(node);
  113. }
  114. static inline void _mpp_list_del(mpp_list_node *prev, mpp_list_node *next)
  115. {
  116. next->prev = prev;
  117. prev->next = next;
  118. }
  119. static inline void mpp_list_del_init(mpp_list_node *node)
  120. {
  121. _mpp_list_del(node->prev, node->next);
  122. list_node_init(node);
  123. }
  124. static inline void _list_del_node_no_lock(mpp_list_node *node, void *data, RK_S32 size)
  125. {
  126. mpp_list_del_init(node);
  127. release_list(node, data, size);
  128. }
  129. RK_S32 mpp_list::del_at_head(void *data, RK_S32 size)
  130. {
  131. RK_S32 ret = -EINVAL;
  132. if (head && count) {
  133. _list_del_node_no_lock(head->next, data, size);
  134. count--;
  135. ret = 0;
  136. }
  137. return ret;
  138. }
  139. RK_S32 mpp_list::del_at_tail(void *data, RK_S32 size)
  140. {
  141. RK_S32 ret = -EINVAL;
  142. if (head && count) {
  143. _list_del_node_no_lock(head->prev, data, size);
  144. count--;
  145. ret = 0;
  146. }
  147. return ret;
  148. }
  149. static mpp_list_node* create_list_with_size(void *data, RK_S32 size, RK_U32 key)
  150. {
  151. mpp_list_node *node = (mpp_list_node*)malloc(sizeof(mpp_list_node) +
  152. sizeof(size) + size);
  153. if (node) {
  154. RK_S32 *dst = (RK_S32 *)(node + 1);
  155. list_node_init_with_key_and_size(node, key, size);
  156. *dst++ = size;
  157. memcpy(dst, data, size);
  158. } else {
  159. LIST_ERROR("failed to allocate list node");
  160. }
  161. return node;
  162. }
  163. RK_S32 mpp_list::fifo_wr(void *data, RK_S32 size)
  164. {
  165. RK_S32 ret = -EINVAL;
  166. if (head) {
  167. mpp_list_node *node = create_list_with_size(data, size, 0);
  168. if (node) {
  169. mpp_list_add_tail(node, head);
  170. count++;
  171. ret = 0;
  172. } else {
  173. ret = -ENOMEM;
  174. }
  175. }
  176. return ret;
  177. }
  178. static void release_list_with_size(mpp_list_node* node, void *data, RK_S32 *size)
  179. {
  180. RK_S32 *src = (RK_S32*)(node + 1);
  181. RK_S32 data_size = *src++;
  182. *size = data_size;
  183. if (data)
  184. memcpy(data, src, data_size);
  185. free(node);
  186. }
  187. RK_S32 mpp_list::fifo_rd(void *data, RK_S32 *size)
  188. {
  189. RK_S32 ret = -EINVAL;
  190. if (head && count) {
  191. mpp_list_node *node = head->next;
  192. mpp_list_del_init(node);
  193. release_list_with_size(node, data, size);
  194. count--;
  195. ret = 0;
  196. }
  197. return ret;
  198. }
  199. RK_S32 mpp_list::list_is_empty()
  200. {
  201. RK_S32 ret = (count == 0);
  202. return ret;
  203. }
  204. RK_S32 mpp_list::list_size()
  205. {
  206. RK_S32 ret = count;
  207. return ret;
  208. }
  209. RK_S32 mpp_list::add_by_key(void *data, RK_S32 size, RK_U32 *key)
  210. {
  211. RK_S32 ret = 0;
  212. if (head) {
  213. RK_U32 list_key = get_key();
  214. *key = list_key;
  215. mpp_list_node *node = create_list(data, size, list_key);
  216. if (node) {
  217. mpp_list_add_tail(node, head);
  218. count++;
  219. ret = 0;
  220. } else {
  221. ret = -ENOMEM;
  222. }
  223. }
  224. return ret;
  225. }
  226. RK_S32 mpp_list::del_by_key(void *data, RK_S32 size, RK_U32 key)
  227. {
  228. RK_S32 ret = 0;
  229. if (head && count) {
  230. struct mpp_list_node *tmp = head->next;
  231. ret = -EINVAL;
  232. while (tmp->next != head) {
  233. if (tmp->key == key) {
  234. _list_del_node_no_lock(tmp, data, size);
  235. count--;
  236. break;
  237. }
  238. }
  239. }
  240. return ret;
  241. }
  242. RK_S32 mpp_list::show_by_key(void *data, RK_U32 key)
  243. {
  244. RK_S32 ret = -EINVAL;
  245. (void)data;
  246. (void)key;
  247. return ret;
  248. }
  249. RK_S32 mpp_list::flush()
  250. {
  251. if (head) {
  252. while (count) {
  253. mpp_list_node* node = head->next;
  254. mpp_list_del_init(node);
  255. if (destroy) {
  256. destroy((void*)(node + 1));
  257. }
  258. free(node);
  259. count--;
  260. }
  261. }
  262. signal();
  263. return 0;
  264. }
  265. MPP_RET mpp_list::wait_lt(RK_S64 timeout, RK_S32 val)
  266. {
  267. if (list_size() < val)
  268. return MPP_OK;
  269. if (!timeout)
  270. return MPP_NOK;
  271. if (timeout < 0)
  272. wait();
  273. else
  274. wait(timeout);
  275. return list_size() < val ? MPP_OK : MPP_NOK;
  276. }
  277. MPP_RET mpp_list::wait_le(RK_S64 timeout, RK_S32 val)
  278. {
  279. if (list_size() <= val)
  280. return MPP_OK;
  281. if (!timeout)
  282. return MPP_NOK;
  283. if (timeout < 0)
  284. wait();
  285. else
  286. wait(timeout);
  287. return list_size() <= val ? MPP_OK : MPP_NOK;
  288. }
  289. MPP_RET mpp_list::wait_gt(RK_S64 timeout, RK_S32 val)
  290. {
  291. if (list_size() > val)
  292. return MPP_OK;
  293. if (!timeout)
  294. return MPP_NOK;
  295. if (timeout < 0)
  296. wait();
  297. else
  298. wait(timeout);
  299. return list_size() > val ? MPP_OK : MPP_NOK;
  300. }
  301. MPP_RET mpp_list::wait_ge(RK_S64 timeout, RK_S32 val)
  302. {
  303. if (list_size() >= val)
  304. return MPP_OK;
  305. if (!timeout)
  306. return MPP_NOK;
  307. if (timeout < 0)
  308. wait();
  309. else
  310. wait(timeout);
  311. return list_size() >= val ? MPP_OK : MPP_NOK;
  312. }
  313. RK_U32 mpp_list::get_key()
  314. {
  315. return keys++;
  316. }
  317. mpp_list::mpp_list(node_destructor func)
  318. : destroy(NULL),
  319. head(NULL),
  320. count(0)
  321. {
  322. destroy = func;
  323. head = (mpp_list_node*)malloc(sizeof(mpp_list_node));
  324. if (NULL == head) {
  325. LIST_ERROR("failed to allocate list header");
  326. } else {
  327. list_node_init_with_key_and_size(head, 0, 0);
  328. }
  329. }
  330. mpp_list::~mpp_list()
  331. {
  332. flush();
  333. if (head) free(head);
  334. head = NULL;
  335. destroy = NULL;
  336. }
  337. /* list sort porting from kernel list_sort.c */
  338. /*
  339. * Returns a list organized in an intermediate format suited
  340. * to chaining of merge() calls: null-terminated, no reserved or
  341. * sentinel head node, "prev" links not maintained.
  342. */
  343. static struct list_head *merge(void *priv, list_cmp_func_t cmp,
  344. struct list_head *a, struct list_head *b)
  345. {
  346. struct list_head *head, **tail = &head;
  347. for (;;) {
  348. /* if equal, take 'a' -- important for sort stability */
  349. if (cmp(priv, a, b) <= 0) {
  350. *tail = a;
  351. tail = &a->next;
  352. a = a->next;
  353. if (!a) {
  354. *tail = b;
  355. break;
  356. }
  357. } else {
  358. *tail = b;
  359. tail = &b->next;
  360. b = b->next;
  361. if (!b) {
  362. *tail = a;
  363. break;
  364. }
  365. }
  366. }
  367. return head;
  368. }
  369. /*
  370. * Combine final list merge with restoration of standard doubly-linked
  371. * list structure. This approach duplicates code from merge(), but
  372. * runs faster than the tidier alternatives of either a separate final
  373. * prev-link restoration pass, or maintaining the prev links
  374. * throughout.
  375. */
  376. static void merge_final(void *priv, list_cmp_func_t cmp, struct list_head *head,
  377. struct list_head *a, struct list_head *b)
  378. {
  379. struct list_head *tail = head;
  380. RK_U8 count = 0;
  381. for (;;) {
  382. /* if equal, take 'a' -- important for sort stability */
  383. if (cmp(priv, a, b) <= 0) {
  384. tail->next = a;
  385. a->prev = tail;
  386. tail = a;
  387. a = a->next;
  388. if (!a)
  389. break;
  390. } else {
  391. tail->next = b;
  392. b->prev = tail;
  393. tail = b;
  394. b = b->next;
  395. if (!b) {
  396. b = a;
  397. break;
  398. }
  399. }
  400. }
  401. /* Finish linking remainder of list b on to tail */
  402. tail->next = b;
  403. do {
  404. /*
  405. * If the merge is highly unbalanced (e.g. the input is
  406. * already sorted), this loop may run many iterations.
  407. * Continue callbacks to the client even though no
  408. * element comparison is needed, so the client's cmp()
  409. * routine can invoke cond_resched() periodically.
  410. */
  411. if (!++count)
  412. cmp(priv, b, b);
  413. b->prev = tail;
  414. tail = b;
  415. b = b->next;
  416. } while (b);
  417. /* And the final links to make a circular doubly-linked list */
  418. tail->next = head;
  419. head->prev = tail;
  420. }
  421. void list_sort(void *priv, struct list_head *head, list_cmp_func_t cmp)
  422. {
  423. struct list_head *list = head->next, *pending = NULL;
  424. size_t count = 0; /* Count of pending */
  425. if (list == head->prev) /* Zero or one elements */
  426. return;
  427. /* Convert to a null-terminated singly-linked list. */
  428. head->prev->next = NULL;
  429. /*
  430. * Data structure invariants:
  431. * - All lists are singly linked and null-terminated; prev
  432. * pointers are not maintained.
  433. * - pending is a prev-linked "list of lists" of sorted
  434. * sublists awaiting further merging.
  435. * - Each of the sorted sublists is power-of-two in size.
  436. * - Sublists are sorted by size and age, smallest & newest at front.
  437. * - There are zero to two sublists of each size.
  438. * - A pair of pending sublists are merged as soon as the number
  439. * of following pending elements equals their size (i.e.
  440. * each time count reaches an odd multiple of that size).
  441. * That ensures each later final merge will be at worst 2:1.
  442. * - Each round consists of:
  443. * - Merging the two sublists selected by the highest bit
  444. * which flips when count is incremented, and
  445. * - Adding an element from the input as a size-1 sublist.
  446. */
  447. do {
  448. size_t bits;
  449. struct list_head **tail = &pending;
  450. /* Find the least-significant clear bit in count */
  451. for (bits = count; bits & 1; bits >>= 1)
  452. tail = &(*tail)->prev;
  453. /* Do the indicated merge */
  454. if (bits) {
  455. struct list_head *a = *tail, *b = a->prev;
  456. a = merge(priv, cmp, b, a);
  457. /* Install the merged result in place of the inputs */
  458. a->prev = b->prev;
  459. *tail = a;
  460. }
  461. /* Move one element from input list to pending */
  462. list->prev = pending;
  463. pending = list;
  464. list = list->next;
  465. pending->next = NULL;
  466. count++;
  467. } while (list);
  468. /* End of input; merge together all the pending lists. */
  469. list = pending;
  470. pending = pending->prev;
  471. for (;;) {
  472. struct list_head *next = pending->prev;
  473. if (!next)
  474. break;
  475. list = merge(priv, cmp, pending, list);
  476. pending = next;
  477. }
  478. /* The final merge, rebuilding prev links */
  479. merge_final(priv, cmp, head, pending, list);
  480. }
  481. #if BUILD_RK_LIST_TEST
  482. #include "vpu_mem.h"
  483. #include <stdio.h>
  484. #include <stdlib.h>
  485. #include <string.h>
  486. #define LOOP_RK_LIST 600
  487. #define COUNT_ADD 100
  488. #define COUNT_DEL 99
  489. volatile int err = 0;
  490. static int mpp_list_fifo_test(mpp_list *list_0)
  491. {
  492. int count;
  493. VPUMemLinear_t m;
  494. for (count = 0; count < COUNT_ADD; count++) {
  495. err |= VPUMallocLinear(&m, 100);
  496. if (err) {
  497. printf("VPUMallocLinear in mpp_list_fifo_test\n");
  498. break;
  499. }
  500. err |= list_0->add_at_head(&m, sizeof(m));
  501. if (err) {
  502. printf("add_at_head in mpp_list_fifo_test\n");
  503. break;
  504. }
  505. }
  506. if (!err) {
  507. for (count = 0; count < COUNT_DEL; count++) {
  508. err |= list_0->del_at_tail(&m, sizeof(m));
  509. if (err) {
  510. printf("del_at_tail in mpp_list_fifo_test\n");
  511. break;
  512. }
  513. err |= VPUFreeLinear(&m);
  514. if (err) {
  515. printf("VPUFreeLinear in mpp_list_fifo_test\n");
  516. break;
  517. }
  518. }
  519. }
  520. return err;
  521. }
  522. static int mpp_list_filo_test(mpp_list *list_0)
  523. {
  524. int count;
  525. VPUMemLinear_t m;
  526. for (count = 0; count < COUNT_ADD + COUNT_DEL; count++) {
  527. if (count & 1) {
  528. err |= list_0->del_at_head(&m, sizeof(m));
  529. if (err) {
  530. printf("del_at_head in mpp_list_filo_test\n");
  531. break;
  532. }
  533. err |= VPUFreeLinear(&m);
  534. if (err) {
  535. printf("VPUFreeLinear in mpp_list_fifo_test\n");
  536. break;
  537. }
  538. } else {
  539. err |= VPUMallocLinear(&m, 100);
  540. if (err) {
  541. printf("VPUMallocLinear in mpp_list_filo_test\n");
  542. break;
  543. }
  544. err |= list_0->add_at_head(&m, sizeof(m));
  545. if (err) {
  546. printf("add_at_head in mpp_list_fifo_test\n");
  547. break;
  548. }
  549. }
  550. }
  551. return err;
  552. }
  553. void *mpp_list_test_loop_0(void *pdata)
  554. {
  555. int i;
  556. mpp_list *list_0 = (mpp_list *)pdata;
  557. printf("mpp_list test 0 loop start\n");
  558. for (i = 0; i < LOOP_RK_LIST; i++) {
  559. err |= mpp_list_filo_test(list_0);
  560. if (err) break;
  561. }
  562. if (err) {
  563. printf("thread: found vpu mem operation err %d\n", err);
  564. } else {
  565. printf("thread: test done and found no err\n");
  566. }
  567. return NULL;
  568. }
  569. int mpp_list_test_0()
  570. {
  571. int i, err = 0;
  572. printf("mpp_list test 0 FIFO start\n");
  573. mpp_list *list_0 = new mpp_list((node_destructor)VPUFreeLinear);
  574. pthread_t mThread;
  575. pthread_attr_t attr;
  576. pthread_attr_init(&attr);
  577. pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
  578. pthread_create(&mThread, &attr, mpp_list_test_loop_0, (void*)list_0);
  579. pthread_attr_destroy(&attr);
  580. for (i = 0; i < LOOP_RK_LIST; i++) {
  581. err |= mpp_list_fifo_test(list_0);
  582. if (err) break;
  583. }
  584. if (err) {
  585. printf("main : found mpp_list operation err %d\n", err);
  586. } else {
  587. printf("main : test done and found no err\n");
  588. }
  589. void *dummy;
  590. pthread_join(mThread, &dummy);
  591. printf("mpp_list test 0 end size %d\n", list_0->list_size());
  592. delete list_0;
  593. return err;
  594. }
  595. #define TOTAL_RK_LIST_TEST_COUNT 1
  596. typedef int (*RK_LIST_TEST_FUNC)(void);
  597. RK_LIST_TEST_FUNC test_func[TOTAL_RK_LIST_TEST_COUNT] = {
  598. mpp_list_test_0,
  599. };
  600. int main(int argc, char *argv[])
  601. {
  602. int i, start = 0, end = 0;
  603. if (argc < 2) {
  604. end = TOTAL_RK_LIST_TEST_COUNT;
  605. } else if (argc == 2) {
  606. start = atoi(argv[1]);
  607. end = start + 1;
  608. } else if (argc == 3) {
  609. start = atoi(argv[1]);
  610. end = atoi(argv[2]);
  611. } else {
  612. printf("too many argc %d\n", argc);
  613. return -1;
  614. }
  615. if (start < 0 || start > TOTAL_RK_LIST_TEST_COUNT || end < 0 || end > TOTAL_RK_LIST_TEST_COUNT) {
  616. printf("invalid input: start %d end %d\n", start, end);
  617. return -1;
  618. }
  619. for (i = start; i < end; i++) {
  620. int err = test_func[i]();
  621. if (err) {
  622. printf("test case %d return err %d\n", i, err);
  623. break;
  624. }
  625. }
  626. return 0;
  627. }
  628. #endif