mpp_dmabuf.cpp 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141
  1. /* SPDX-License-Identifier: Apache-2.0 OR MIT */
  2. /*
  3. * Copyright (c) 2023 Rockchip Electronics Co., Ltd.
  4. */
  5. #include <string.h>
  6. #include <errno.h>
  7. #include <sys/ioctl.h>
  8. #include "mpp_env.h"
  9. #include "mpp_log.h"
  10. #include "mpp_common.h"
  11. #include "mpp_dmabuf.h"
  12. #include "linux/dma-buf.h"
  13. #define MPP_NO_PARTIAL_SUPPORT 25 /* ENOTTY */
  14. #define CACHE_LINE_SIZE 64
  15. static RK_U32 has_partial_ops = 0;
  16. __attribute__ ((constructor))
  17. void mpp_dmabuf_init(void)
  18. {
  19. /*
  20. * update has_partial_ops by env
  21. * NOTE: When dmaheap is enabled the dmaheap fd partial ops is fine.
  22. * But the drm fd partial ops may have error when kernel version above 4.19
  23. * So we provide the mpp_dmabuf_has_partial_ops env to disable partial ops.
  24. */
  25. mpp_env_get_u32("mpp_dmabuf_has_partial_ops", &has_partial_ops, has_partial_ops);
  26. }
  27. MPP_RET mpp_dmabuf_sync_begin(RK_S32 fd, RK_S32 ro, const char *caller)
  28. {
  29. struct dma_buf_sync sync;
  30. RK_S32 ret;
  31. sync.flags = DMA_BUF_SYNC_START | (ro ? DMA_BUF_SYNC_READ : DMA_BUF_SYNC_RW);
  32. ret = ioctl(fd, DMA_BUF_IOCTL_SYNC, &sync);
  33. if (ret) {
  34. mpp_err_f("ioctl failed for %s from %s\n", strerror(errno), caller);
  35. return MPP_NOK;
  36. }
  37. return MPP_OK;
  38. }
  39. MPP_RET mpp_dmabuf_sync_end(RK_S32 fd, RK_S32 ro, const char *caller)
  40. {
  41. struct dma_buf_sync sync;
  42. RK_S32 ret;
  43. sync.flags = DMA_BUF_SYNC_END | (ro ? DMA_BUF_SYNC_READ : DMA_BUF_SYNC_RW);
  44. ret = ioctl(fd, DMA_BUF_IOCTL_SYNC, &sync);
  45. if (ret) {
  46. mpp_err_f("ioctl failed for %s from %s\n", strerror(errno), caller);
  47. return MPP_NOK;
  48. }
  49. return MPP_OK;
  50. }
  51. MPP_RET mpp_dmabuf_sync_partial_begin(RK_S32 fd, RK_S32 ro, RK_U32 offset, RK_U32 length, const char *caller)
  52. {
  53. if (has_partial_ops) {
  54. struct dma_buf_sync_partial sync;
  55. RK_S32 ret;
  56. if (!length)
  57. return MPP_OK;
  58. sync.flags = DMA_BUF_SYNC_START | (ro ? DMA_BUF_SYNC_READ : DMA_BUF_SYNC_RW);
  59. sync.offset = MPP_ALIGN_DOWN(offset, CACHE_LINE_SIZE);
  60. sync.len = MPP_ALIGN(length + offset - sync.offset, CACHE_LINE_SIZE);
  61. ret = ioctl(fd, DMA_BUF_IOCTL_SYNC_PARTIAL, &sync);
  62. if (ret) {
  63. if (errno == MPP_NO_PARTIAL_SUPPORT) {
  64. has_partial_ops = 0;
  65. goto NOT_SUPPORT;
  66. }
  67. mpp_err_f("ioctl failed for %s from %s\n", strerror(errno), caller);
  68. return MPP_NOK;
  69. }
  70. return MPP_OK;
  71. }
  72. NOT_SUPPORT:
  73. return mpp_dmabuf_sync_begin(fd, ro, caller);
  74. }
  75. MPP_RET mpp_dmabuf_sync_partial_end(RK_S32 fd, RK_S32 ro, RK_U32 offset, RK_U32 length, const char *caller)
  76. {
  77. if (has_partial_ops) {
  78. struct dma_buf_sync_partial sync;
  79. RK_S32 ret;
  80. if (!length)
  81. return MPP_OK;
  82. sync.flags = DMA_BUF_SYNC_END | (ro ? DMA_BUF_SYNC_READ : DMA_BUF_SYNC_RW);
  83. sync.offset = MPP_ALIGN_DOWN(offset, CACHE_LINE_SIZE);
  84. sync.len = MPP_ALIGN(length + offset - sync.offset, CACHE_LINE_SIZE);
  85. ret = ioctl(fd, DMA_BUF_IOCTL_SYNC_PARTIAL, &sync);
  86. if (ret) {
  87. if (errno == MPP_NO_PARTIAL_SUPPORT) {
  88. has_partial_ops = 0;
  89. goto NOT_SUPPORT;
  90. }
  91. mpp_err_f("ioctl failed for %s from %s\n", strerror(errno), caller);
  92. return MPP_NOK;
  93. }
  94. return MPP_OK;
  95. }
  96. NOT_SUPPORT:
  97. return mpp_dmabuf_sync_end(fd, ro, caller);
  98. }
  99. MPP_RET mpp_dmabuf_set_name(RK_S32 fd, const char *name, const char *caller)
  100. {
  101. RK_S32 ret = ioctl(fd, DMA_BUF_SET_NAME, name);
  102. if (ret) {
  103. mpp_err_f("ioctl failed for %s from %s\n", strerror(errno), caller);
  104. return MPP_NOK;
  105. }
  106. return MPP_OK;
  107. }
  108. RK_U32 mpp_dmabuf_sync_partial_support(void)
  109. {
  110. return has_partial_ops;
  111. }