blkcache.c 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) Nelson Integration, LLC 2016
  4. * Author: Eric Nelson<eric@nelint.com>
  5. *
  6. */
  7. #include <config.h>
  8. #include <common.h>
  9. #include <malloc.h>
  10. #include <part.h>
  11. #include <linux/ctype.h>
  12. #include <linux/list.h>
  13. struct block_cache_node {
  14. struct list_head lh;
  15. int iftype;
  16. int devnum;
  17. lbaint_t start;
  18. lbaint_t blkcnt;
  19. unsigned long blksz;
  20. char *cache;
  21. };
  22. static LIST_HEAD(block_cache);
  23. static struct block_cache_stats _stats = {
  24. .max_blocks_per_entry = 2,
  25. .max_entries = 32
  26. };
  27. static struct block_cache_node *cache_find(int iftype, int devnum,
  28. lbaint_t start, lbaint_t blkcnt,
  29. unsigned long blksz)
  30. {
  31. struct block_cache_node *node;
  32. list_for_each_entry(node, &block_cache, lh)
  33. if ((node->iftype == iftype) &&
  34. (node->devnum == devnum) &&
  35. (node->blksz == blksz) &&
  36. (node->start <= start) &&
  37. (node->start + node->blkcnt >= start + blkcnt)) {
  38. if (block_cache.next != &node->lh) {
  39. /* maintain MRU ordering */
  40. list_del(&node->lh);
  41. list_add(&node->lh, &block_cache);
  42. }
  43. return node;
  44. }
  45. return 0;
  46. }
  47. int blkcache_read(int iftype, int devnum,
  48. lbaint_t start, lbaint_t blkcnt,
  49. unsigned long blksz, void *buffer)
  50. {
  51. struct block_cache_node *node = cache_find(iftype, devnum, start,
  52. blkcnt, blksz);
  53. if (node) {
  54. const char *src = node->cache + (start - node->start) * blksz;
  55. memcpy(buffer, src, blksz * blkcnt);
  56. debug("hit: start " LBAF ", count " LBAFU "\n",
  57. start, blkcnt);
  58. ++_stats.hits;
  59. return 1;
  60. }
  61. debug("miss: start " LBAF ", count " LBAFU "\n",
  62. start, blkcnt);
  63. ++_stats.misses;
  64. return 0;
  65. }
  66. void blkcache_fill(int iftype, int devnum,
  67. lbaint_t start, lbaint_t blkcnt,
  68. unsigned long blksz, void const *buffer)
  69. {
  70. lbaint_t bytes;
  71. struct block_cache_node *node;
  72. /* don't cache big stuff */
  73. if (blkcnt > _stats.max_blocks_per_entry)
  74. return;
  75. if (_stats.max_entries == 0)
  76. return;
  77. bytes = blksz * blkcnt;
  78. if (_stats.max_entries <= _stats.entries) {
  79. /* pop LRU */
  80. node = (struct block_cache_node *)block_cache.prev;
  81. list_del(&node->lh);
  82. _stats.entries--;
  83. debug("drop: start " LBAF ", count " LBAFU "\n",
  84. node->start, node->blkcnt);
  85. if (node->blkcnt * node->blksz < bytes) {
  86. free(node->cache);
  87. node->cache = 0;
  88. }
  89. } else {
  90. node = malloc(sizeof(*node));
  91. if (!node)
  92. return;
  93. node->cache = 0;
  94. }
  95. if (!node->cache) {
  96. node->cache = malloc(bytes);
  97. if (!node->cache) {
  98. free(node);
  99. return;
  100. }
  101. }
  102. debug("fill: start " LBAF ", count " LBAFU "\n",
  103. start, blkcnt);
  104. node->iftype = iftype;
  105. node->devnum = devnum;
  106. node->start = start;
  107. node->blkcnt = blkcnt;
  108. node->blksz = blksz;
  109. memcpy(node->cache, buffer, bytes);
  110. list_add(&node->lh, &block_cache);
  111. _stats.entries++;
  112. }
  113. void blkcache_invalidate(int iftype, int devnum)
  114. {
  115. struct list_head *entry, *n;
  116. struct block_cache_node *node;
  117. list_for_each_safe(entry, n, &block_cache) {
  118. node = (struct block_cache_node *)entry;
  119. if ((node->iftype == iftype) &&
  120. (node->devnum == devnum)) {
  121. list_del(entry);
  122. free(node->cache);
  123. free(node);
  124. --_stats.entries;
  125. }
  126. }
  127. }
  128. void blkcache_configure(unsigned blocks, unsigned entries)
  129. {
  130. struct block_cache_node *node;
  131. if ((blocks != _stats.max_blocks_per_entry) ||
  132. (entries != _stats.max_entries)) {
  133. /* invalidate cache */
  134. while (!list_empty(&block_cache)) {
  135. node = (struct block_cache_node *)block_cache.next;
  136. list_del(&node->lh);
  137. free(node->cache);
  138. free(node);
  139. }
  140. _stats.entries = 0;
  141. }
  142. _stats.max_blocks_per_entry = blocks;
  143. _stats.max_entries = entries;
  144. _stats.hits = 0;
  145. _stats.misses = 0;
  146. }
  147. void blkcache_stats(struct block_cache_stats *stats)
  148. {
  149. memcpy(stats, &_stats, sizeof(*stats));
  150. _stats.hits = 0;
  151. _stats.misses = 0;
  152. }