allocator_drm.c 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386
  1. /*
  2. * Copyright 2010 Rockchip Electronics S.LSI Co. LTD
  3. *
  4. * Licensed under the Apache License, Versdrm 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITDRMS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissdrms and
  14. * limitatdrms under the License.
  15. */
  16. #define MODULE_TAG "mpp_drm"
  17. #include <unistd.h>
  18. #include <string.h>
  19. #include <errno.h>
  20. #include <sys/ioctl.h>
  21. #include <sys/mman.h>
  22. #include <linux/drm.h>
  23. #include <linux/drm_mode.h>
  24. #include "os_mem.h"
  25. #include "allocator_drm.h"
  26. #include "mpp_env.h"
  27. #include "mpp_mem.h"
  28. #include "mpp_debug.h"
  29. #include "mpp_common.h"
  30. #include "mpp_runtime.h"
  31. static RK_U32 drm_debug = 0;
  32. #define DRM_FUNCTION (0x00000001)
  33. #define DRM_DEVICE (0x00000002)
  34. #define DRM_IOCTL (0x00000004)
  35. #define drm_dbg(flag, fmt, ...) _mpp_dbg_f(drm_debug, flag, fmt, ## __VA_ARGS__)
  36. #define drm_dbg_func(fmt, ...) drm_dbg(DRM_FUNCTION, fmt, ## __VA_ARGS__)
  37. #define drm_dbg_dev(fmt, ...) drm_dbg(DRM_DEVICE, fmt, ## __VA_ARGS__)
  38. #define drm_dbg_ioctl(fmt, ...) drm_dbg(DRM_IOCTL, fmt, ## __VA_ARGS__)
  39. /* memory type definitions. */
  40. enum drm_rockchip_gem_mem_type {
  41. /* Physically Continuous memory. */
  42. ROCKCHIP_BO_CONTIG = 1 << 0,
  43. /* cachable mapping. */
  44. ROCKCHIP_BO_CACHABLE = 1 << 1,
  45. /* write-combine mapping. */
  46. ROCKCHIP_BO_WC = 1 << 2,
  47. ROCKCHIP_BO_SECURE = 1 << 3,
  48. /* keep kmap for cma buffer or alloc kmap for other type memory */
  49. ROCKCHIP_BO_ALLOC_KMAP = 1 << 4,
  50. /* alloc page with gfp_dma32 */
  51. ROCKCHIP_BO_DMA32 = 1 << 5,
  52. ROCKCHIP_BO_MASK = ROCKCHIP_BO_CONTIG | ROCKCHIP_BO_CACHABLE |
  53. ROCKCHIP_BO_WC | ROCKCHIP_BO_SECURE | ROCKCHIP_BO_ALLOC_KMAP |
  54. ROCKCHIP_BO_DMA32,
  55. };
  56. typedef struct {
  57. RK_U32 alignment;
  58. RK_S32 drm_device;
  59. RK_U32 flags;
  60. } allocator_ctx_drm;
  61. /* use renderD128 first to avoid GKI kernel permission issue */
  62. static const char *dev_drm[] = {
  63. "/dev/dri/renderD128",
  64. "/dev/dri/card0",
  65. };
  66. static RK_U32 to_rockchip_gem_mem_flag(RK_U32 flags)
  67. {
  68. RK_U32 ret = 0;
  69. if (flags & MPP_ALLOC_FLAG_DMA32)
  70. ret |= ROCKCHIP_BO_DMA32;
  71. if (flags & MPP_ALLOC_FLAG_CACHABLE)
  72. ret |= ROCKCHIP_BO_CACHABLE;
  73. if (flags & MPP_ALLOC_FLAG_CMA)
  74. ret |= ROCKCHIP_BO_CONTIG;
  75. return ret;
  76. }
  77. static int drm_ioctl(int fd, int req, void *arg)
  78. {
  79. int ret;
  80. do {
  81. ret = ioctl(fd, req, arg);
  82. } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
  83. drm_dbg_ioctl("%x ret %d: %s\n", req, ret, strerror(errno));
  84. return ret;
  85. }
  86. static int drm_handle_to_fd(int fd, RK_U32 handle, int *map_fd, RK_U32 flags)
  87. {
  88. int ret;
  89. struct drm_prime_handle dph;
  90. memset(&dph, 0, sizeof(struct drm_prime_handle));
  91. dph.handle = handle;
  92. dph.fd = -1;
  93. dph.flags = flags;
  94. if (map_fd == NULL)
  95. return -EINVAL;
  96. ret = drm_ioctl(fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &dph);
  97. if (ret < 0)
  98. return ret;
  99. *map_fd = dph.fd;
  100. drm_dbg_func("dev %d handle %d flags %x get fd %d\n", fd, handle, dph.flags, *map_fd);
  101. if (*map_fd < 0) {
  102. mpp_err_f("map ioctl returned negative fd\n");
  103. return -EINVAL;
  104. }
  105. return ret;
  106. }
  107. static int drm_alloc(int fd, size_t len, size_t align, RK_U32 *handle, RK_U32 flags)
  108. {
  109. int ret;
  110. struct drm_mode_create_dumb dmcb;
  111. memset(&dmcb, 0, sizeof(struct drm_mode_create_dumb));
  112. dmcb.bpp = 8;
  113. dmcb.width = (len + align - 1) & (~(align - 1));
  114. dmcb.height = 1;
  115. dmcb.flags = to_rockchip_gem_mem_flag(flags);
  116. if (handle == NULL)
  117. return -EINVAL;
  118. ret = drm_ioctl(fd, DRM_IOCTL_MODE_CREATE_DUMB, &dmcb);
  119. if (ret < 0)
  120. return ret;
  121. *handle = dmcb.handle;
  122. drm_dbg_func("dev %d alloc aligned %d flags %x handle %d size %lld\n", fd,
  123. align, dmcb.flags, dmcb.handle, dmcb.size);
  124. return ret;
  125. }
  126. static int drm_free(int fd, RK_U32 handle)
  127. {
  128. struct drm_mode_destroy_dumb data = {
  129. .handle = handle,
  130. };
  131. return drm_ioctl(fd, DRM_IOCTL_MODE_DESTROY_DUMB, &data);
  132. }
  133. static MPP_RET os_allocator_drm_open(void **ctx, size_t alignment, MppAllocFlagType flags)
  134. {
  135. allocator_ctx_drm *p;
  136. RK_S32 fd;
  137. RK_S32 i;
  138. if (NULL == ctx) {
  139. mpp_err_f("does not accept NULL input\n");
  140. return MPP_ERR_NULL_PTR;
  141. }
  142. *ctx = NULL;
  143. mpp_env_get_u32("drm_debug", &drm_debug, 0);
  144. for (i = 0; i < (RK_S32)MPP_ARRAY_ELEMS(dev_drm); i++) {
  145. fd = open(dev_drm[i], O_RDWR | O_CLOEXEC);
  146. if (fd > 0)
  147. break;
  148. }
  149. if (fd < 0) {
  150. mpp_err_f("open all drm device failed.\n");
  151. mpp_err("Please check the following device path and access permission:\n");
  152. for (i = 0; i < (RK_S32)MPP_ARRAY_ELEMS(dev_drm); i++)
  153. mpp_err("%s\n", dev_drm[i]);
  154. return MPP_ERR_UNKNOW;
  155. } else {
  156. /* drop master by default to avoid becoming the drm master */
  157. drm_ioctl(fd, DRM_IOCTL_DROP_MASTER, 0);
  158. }
  159. drm_dbg_dev("open drm dev fd %d flags %x\n", fd, flags);
  160. p = mpp_malloc(allocator_ctx_drm, 1);
  161. if (NULL == p) {
  162. close(fd);
  163. mpp_err_f("failed to allocate context\n");
  164. return MPP_ERR_MALLOC;
  165. } else {
  166. /*
  167. * default drm use cma, do nothing here
  168. */
  169. p->alignment = alignment;
  170. p->flags = flags;
  171. p->drm_device = fd;
  172. *ctx = p;
  173. }
  174. return MPP_OK;
  175. }
  176. static MPP_RET os_allocator_drm_alloc(void *ctx, MppBufferInfo *info)
  177. {
  178. MPP_RET ret = MPP_OK;
  179. allocator_ctx_drm *p = NULL;
  180. if (NULL == ctx) {
  181. mpp_err_f("does not accept NULL input\n");
  182. return MPP_ERR_NULL_PTR;
  183. }
  184. p = (allocator_ctx_drm *)ctx;
  185. drm_dbg_func("dev %d alloc alignment %d size %d\n", p->drm_device,
  186. p->alignment, info->size);
  187. ret = drm_alloc(p->drm_device, info->size, p->alignment,
  188. (RK_U32 *)&info->hnd, p->flags);
  189. if (ret) {
  190. mpp_err_f("drm_alloc failed ret %d\n", ret);
  191. return ret;
  192. }
  193. ret = drm_handle_to_fd(p->drm_device, (RK_U32)((intptr_t)info->hnd),
  194. &info->fd, DRM_CLOEXEC | DRM_RDWR);
  195. if (ret) {
  196. mpp_err_f("handle_to_fd failed ret %d\n", ret);
  197. return ret;
  198. }
  199. drm_dbg_func("dev %d get handle %d with fd %d\n", p->drm_device,
  200. (RK_U32)((intptr_t)info->hnd), info->fd);
  201. /* release handle to reduce iova usage */
  202. drm_free(p->drm_device, (RK_U32)((intptr_t)info->hnd));
  203. info->hnd = NULL;
  204. info->ptr = NULL;
  205. return ret;
  206. }
  207. static MPP_RET os_allocator_drm_import(void *ctx, MppBufferInfo *data)
  208. {
  209. allocator_ctx_drm *p = (allocator_ctx_drm *)ctx;
  210. RK_S32 fd_ext = data->fd;
  211. MPP_RET ret = MPP_OK;
  212. drm_dbg_func("enter dev %d\n", p->drm_device);
  213. mpp_assert(fd_ext > 0);
  214. data->fd = mpp_dup(fd_ext);
  215. data->ptr = NULL;
  216. if (data->fd <= 0) {
  217. mpp_err_f(" fd dup return invalid fd %d\n", data->fd);
  218. ret = MPP_NOK;
  219. }
  220. drm_dbg_func("leave dev %d\n", p->drm_device);
  221. return ret;
  222. }
  223. static MPP_RET os_allocator_drm_free(void *ctx, MppBufferInfo *data)
  224. {
  225. allocator_ctx_drm *p = NULL;
  226. if (NULL == ctx) {
  227. mpp_err_f("does not accept NULL input\n");
  228. return MPP_ERR_NULL_PTR;
  229. }
  230. p = (allocator_ctx_drm *)ctx;
  231. drm_dbg_func("dev %d handle %p unmap %p fd %d size %d\n", p->drm_device,
  232. data->hnd, data->ptr, data->fd, data->size);
  233. if (data->ptr) {
  234. munmap(data->ptr, data->size);
  235. data->ptr = NULL;
  236. }
  237. if (data->fd > 0) {
  238. close(data->fd);
  239. data->fd = -1;
  240. } else {
  241. mpp_err_f("can not close invalid fd %d\n", data->fd);
  242. }
  243. return MPP_OK;
  244. }
  245. static MPP_RET os_allocator_drm_close(void *ctx)
  246. {
  247. int ret;
  248. allocator_ctx_drm *p;
  249. if (NULL == ctx) {
  250. mpp_err("os_allocator_close doesn't accept NULL input\n");
  251. return MPP_ERR_NULL_PTR;
  252. }
  253. p = (allocator_ctx_drm *)ctx;
  254. drm_dbg_func("dev %d", p->drm_device);
  255. ret = close(p->drm_device);
  256. mpp_free(p);
  257. if (ret < 0)
  258. return (MPP_RET) - errno;
  259. return MPP_OK;
  260. }
  261. static MPP_RET os_allocator_drm_mmap(void *ctx, MppBufferInfo *data)
  262. {
  263. allocator_ctx_drm *p;
  264. MPP_RET ret = MPP_OK;
  265. if (NULL == ctx) {
  266. mpp_err("os_allocator_close do not accept NULL input\n");
  267. return MPP_ERR_NULL_PTR;
  268. }
  269. p = (allocator_ctx_drm *)ctx;
  270. if (NULL == ctx)
  271. return MPP_ERR_NULL_PTR;
  272. if (NULL == data->ptr) {
  273. int flags = PROT_READ;
  274. if (fcntl(data->fd, F_GETFL) & O_RDWR)
  275. flags |= PROT_WRITE;
  276. data->ptr = mmap(NULL, data->size, flags, MAP_SHARED, data->fd, 0);
  277. if (data->ptr == MAP_FAILED) {
  278. mpp_err("mmap failed: %s\n", strerror(errno));
  279. data->ptr = NULL;
  280. return -errno;
  281. }
  282. drm_dbg_func("dev %d mmap fd %d to %p (%s)\n", p->drm_device,
  283. data->fd, data->ptr,
  284. flags & PROT_WRITE ? "RDWR" : "RDONLY");
  285. }
  286. return ret;
  287. }
  288. static MppAllocFlagType os_allocator_drm_flags(void *ctx)
  289. {
  290. allocator_ctx_drm *p = (allocator_ctx_drm *)ctx;
  291. return p ? (MppAllocFlagType)p->flags : MPP_ALLOC_FLAG_NONE;
  292. }
  293. os_allocator allocator_drm = {
  294. .type = MPP_BUFFER_TYPE_DRM,
  295. .open = os_allocator_drm_open,
  296. .close = os_allocator_drm_close,
  297. .alloc = os_allocator_drm_alloc,
  298. .free = os_allocator_drm_free,
  299. .import = os_allocator_drm_import,
  300. .release = os_allocator_drm_free,
  301. .mmap = os_allocator_drm_mmap,
  302. .flags = os_allocator_drm_flags,
  303. };