mirror of
https://github.com/nyanmisaka/mpp.git
synced 2025-10-05 01:02:39 +08:00
feat[mpp_dmabuf]: Add dmabuf sync operation
sync_begin - cache invalidate, should be called before cpu read sync_end - cache flush, should be called after cpu write MppBuffer sync flow: 1. hw access 2. sync_begin 3. cpu access (read / write) 4. sync_end 5. hw access NOTE: readonly option is faster for read only buffer. Signed-off-by: xueman.ruan <xueman.ruan@rock-chips.com> Signed-off-by: Herman Chen <herman.chen@rock-chips.com> Change-Id: I253a6139e9bb30808c07075d64f17b5cfad8519a
This commit is contained in:
@@ -265,11 +265,29 @@ typedef struct MppBufferInfo_t {
|
||||
#define mpp_buffer_set_offset(buffer, offset) \
|
||||
mpp_buffer_set_offset_with_caller(buffer, offset, __FUNCTION__)
|
||||
|
||||
#define mpp_buffer_sync_begin(buffer) \
|
||||
mpp_buffer_sync_begin_f(buffer, 0, __FUNCTION__)
|
||||
#define mpp_buffer_sync_end(buffer) \
|
||||
mpp_buffer_sync_end_f(buffer, 0, __FUNCTION__)
|
||||
#define mpp_buffer_sync_partial_begin(buffer, offset, length) \
|
||||
mpp_buffer_sync_partial_begin_f(buffer, 0, offset, length, __FUNCTION__)
|
||||
#define mpp_buffer_sync_partial_end(buffer, offset, length) \
|
||||
mpp_buffer_sync_partial_end_f(buffer, 0, offset, length, __FUNCTION__)
|
||||
|
||||
#define mpp_buffer_sync_ro_begin(buffer) \
|
||||
mpp_buffer_sync_begin_f(buffer, 1, __FUNCTION__)
|
||||
#define mpp_buffer_sync_ro_end(buffer) \
|
||||
mpp_buffer_sync_end_f(buffer, 1, __FUNCTION__)
|
||||
#define mpp_buffer_sync_ro_partial_begin(buffer, offset, length) \
|
||||
mpp_buffer_sync_partial_begin_f(buffer, 1, offset, length, __FUNCTION__)
|
||||
#define mpp_buffer_sync_ro_partial_end(buffer, offset, length) \
|
||||
mpp_buffer_sync_partial_end_f(buffer, 1, offset, length, __FUNCTION__)
|
||||
|
||||
#define mpp_buffer_group_get_internal(group, type, ...) \
|
||||
mpp_buffer_group_get(group, type, MPP_BUFFER_INTERNAL, MODULE_TAG, __FUNCTION__)
|
||||
mpp_buffer_group_get(group, (MppBufferType)(type), MPP_BUFFER_INTERNAL, MODULE_TAG, __FUNCTION__)
|
||||
|
||||
#define mpp_buffer_group_get_external(group, type, ...) \
|
||||
mpp_buffer_group_get(group, type, MPP_BUFFER_EXTERNAL, MODULE_TAG, __FUNCTION__)
|
||||
mpp_buffer_group_get(group, (MppBufferType)(type), MPP_BUFFER_EXTERNAL, MODULE_TAG, __FUNCTION__)
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
@@ -302,6 +320,18 @@ MPP_RET mpp_buffer_set_index_with_caller(MppBuffer buffer, int index, const char
|
||||
size_t mpp_buffer_get_offset_with_caller(MppBuffer buffer, const char *caller);
|
||||
MPP_RET mpp_buffer_set_offset_with_caller(MppBuffer buffer, size_t offset, const char *caller);
|
||||
|
||||
/**
|
||||
* @brief MppBuffer cache operation function
|
||||
* @param buffer The MppBuffer to run the cache operation
|
||||
* @param ro for readonly option
|
||||
* @param offset partial sync data start offset
|
||||
* @param length partial sync data length
|
||||
*/
|
||||
MPP_RET mpp_buffer_sync_begin_f(MppBuffer buffer, RK_S32 ro, const char* caller);
|
||||
MPP_RET mpp_buffer_sync_end_f(MppBuffer buffer, RK_S32 ro, const char* caller);
|
||||
MPP_RET mpp_buffer_sync_partial_begin_f(MppBuffer buffer, RK_S32 ro, RK_U32 offset, RK_U32 length, const char* caller);
|
||||
MPP_RET mpp_buffer_sync_partial_end_f(MppBuffer buffer, RK_S32 ro, RK_U32 offset, RK_U32 length, const char* caller);
|
||||
|
||||
MPP_RET mpp_buffer_group_get(MppBufferGroup *group, MppBufferType type, MppBufferMode mode,
|
||||
const char *tag, const char *caller);
|
||||
MPP_RET mpp_buffer_group_put(MppBufferGroup group);
|
||||
|
@@ -96,6 +96,9 @@ struct MppBufferImpl_t {
|
||||
size_t offset;
|
||||
size_t length;
|
||||
|
||||
/* cacheable flag */
|
||||
RK_U32 uncached;
|
||||
|
||||
/*
|
||||
* discard:
|
||||
* used for buf on group reset mode
|
||||
@@ -121,6 +124,7 @@ struct MppBufferGroupImpl_t {
|
||||
RK_U32 group_id;
|
||||
MppBufferMode mode;
|
||||
MppBufferType type;
|
||||
RK_U32 type_flags;
|
||||
|
||||
/* group status flag */
|
||||
// buffer force clear mode flag
|
||||
|
@@ -20,6 +20,7 @@
|
||||
|
||||
#include "mpp_mem.h"
|
||||
#include "mpp_debug.h"
|
||||
#include "mpp_dmabuf.h"
|
||||
#include "mpp_buffer_impl.h"
|
||||
|
||||
MPP_RET mpp_buffer_import_with_tag(MppBufferGroup group, MppBufferInfo *info, MppBuffer *buffer,
|
||||
@@ -275,6 +276,77 @@ MPP_RET mpp_buffer_info_get_with_caller(MppBuffer buffer, MppBufferInfo *info, c
|
||||
return MPP_OK;
|
||||
}
|
||||
|
||||
static MPP_RET check_buf_need_sync(MppBuffer buffer, MPP_RET *ret, const char *caller)
|
||||
{
|
||||
if (NULL == buffer) {
|
||||
mpp_err("check buffer found NULL pointer from %s\n", caller);
|
||||
return MPP_NOK;
|
||||
}
|
||||
|
||||
MppBufferImpl *impl = (MppBufferImpl *)buffer;
|
||||
|
||||
if (impl->info.fd <= 0) {
|
||||
mpp_err("check fd found invalid fd %d from %s\n", impl->info.fd, caller);
|
||||
return MPP_NOK;
|
||||
}
|
||||
|
||||
/* uncached buffer do not need to sync */
|
||||
if (impl->uncached) {
|
||||
*ret = MPP_OK;
|
||||
return MPP_NOK;
|
||||
}
|
||||
|
||||
return MPP_OK;
|
||||
}
|
||||
|
||||
MPP_RET mpp_buffer_sync_begin_f(MppBuffer buffer, RK_S32 ro, const char* caller)
|
||||
{
|
||||
MPP_RET ret = MPP_NOK;
|
||||
|
||||
if (check_buf_need_sync(buffer, &ret, caller))
|
||||
return ret;
|
||||
|
||||
MppBufferImpl *impl = (MppBufferImpl *)buffer;
|
||||
|
||||
return mpp_dmabuf_sync_begin(impl->info.fd, ro, caller);
|
||||
}
|
||||
|
||||
MPP_RET mpp_buffer_sync_end_f(MppBuffer buffer, RK_S32 ro, const char* caller)
|
||||
{
|
||||
MPP_RET ret = MPP_NOK;
|
||||
|
||||
if (check_buf_need_sync(buffer, &ret, caller))
|
||||
return ret;
|
||||
|
||||
MppBufferImpl *impl = (MppBufferImpl *)buffer;
|
||||
|
||||
return mpp_dmabuf_sync_end(impl->info.fd, ro, caller);
|
||||
}
|
||||
|
||||
MPP_RET mpp_buffer_sync_partial_begin_f(MppBuffer buffer, RK_S32 ro, RK_U32 offset, RK_U32 length, const char* caller)
|
||||
{
|
||||
MPP_RET ret = MPP_NOK;
|
||||
|
||||
if (check_buf_need_sync(buffer, &ret, caller))
|
||||
return ret;
|
||||
|
||||
MppBufferImpl *impl = (MppBufferImpl *)buffer;
|
||||
|
||||
return mpp_dmabuf_sync_partial_begin(impl->info.fd, ro, impl->offset + offset, length, caller);
|
||||
}
|
||||
|
||||
MPP_RET mpp_buffer_sync_partial_end_f(MppBuffer buffer, RK_S32 ro, RK_U32 offset, RK_U32 length, const char* caller)
|
||||
{
|
||||
MPP_RET ret = MPP_NOK;
|
||||
|
||||
if (check_buf_need_sync(buffer, &ret, caller))
|
||||
return ret;
|
||||
|
||||
MppBufferImpl *impl = (MppBufferImpl *)buffer;
|
||||
|
||||
return mpp_dmabuf_sync_partial_end(impl->info.fd, ro, impl->offset + offset, length, caller);
|
||||
}
|
||||
|
||||
MPP_RET mpp_buffer_group_get(MppBufferGroup *group, MppBufferType type, MppBufferMode mode,
|
||||
const char *tag, const char *caller)
|
||||
{
|
||||
@@ -376,4 +448,3 @@ MPP_RET mpp_buffer_group_limit_config(MppBufferGroup group, size_t size, RK_S32
|
||||
p->limit_count = count;
|
||||
return MPP_OK;
|
||||
}
|
||||
|
||||
|
@@ -402,6 +402,7 @@ MPP_RET mpp_buffer_create(const char *tag, const char *caller,
|
||||
p->group_id = group->group_id;
|
||||
p->mode = group->mode;
|
||||
p->type = group->type;
|
||||
p->uncached = (group->type_flags & MPP_BUFFER_FLAGS_CACHABLE) ? 0 : 1;
|
||||
p->logs = group->logs;
|
||||
p->info = *info;
|
||||
|
||||
@@ -891,6 +892,7 @@ MppBufferGroupImpl *MppBufferService::get_group(const char *tag, const char *cal
|
||||
p->mode = mode;
|
||||
p->type = buffer_type;
|
||||
p->limit = BUFFER_GROUP_SIZE_DEFAULT;
|
||||
p->type_flags = flags;
|
||||
p->clear_on_exit = (mpp_buffer_debug & MPP_BUF_DBG_CLR_ON_EXIT) ? (1) : (0);
|
||||
p->dump_on_exit = (mpp_buffer_debug & MPP_BUF_DBG_DUMP_ON_EXIT) ? (1) : (0);
|
||||
|
||||
|
@@ -41,6 +41,7 @@ add_library(osal STATIC
|
||||
mpp_mem_pool.cpp
|
||||
mpp_callback.cpp
|
||||
mpp_eventfd.cpp
|
||||
mpp_dmabuf.cpp
|
||||
mpp_thread.cpp
|
||||
mpp_compat.cpp
|
||||
mpp_common.cpp
|
||||
|
25
osal/inc/mpp_dmabuf.h
Normal file
25
osal/inc/mpp_dmabuf.h
Normal file
@@ -0,0 +1,25 @@
|
||||
/* SPDX-License-Identifier: Apache-2.0 OR MIT */
|
||||
/*
|
||||
* Copyright (c) 2023 Rockchip Electronics Co., Ltd.
|
||||
*/
|
||||
|
||||
#ifndef __MPP_DMABUF_H__
|
||||
#define __MPP_DMABUF_H__
|
||||
|
||||
#include "rk_type.h"
|
||||
#include "mpp_err.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
MPP_RET mpp_dmabuf_sync_begin(RK_S32 fd, RK_S32 ro, const char *caller);
|
||||
MPP_RET mpp_dmabuf_sync_end(RK_S32 fd, RK_S32 ro, const char *caller);
|
||||
MPP_RET mpp_dmabuf_sync_partial_begin(RK_S32 fd, RK_S32 ro, RK_U32 offset, RK_U32 length, const char *caller);
|
||||
MPP_RET mpp_dmabuf_sync_partial_end(RK_S32 fd, RK_S32 ro, RK_U32 offset, RK_U32 length, const char *caller);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __MPP_DMABUF_H__ */
|
127
osal/mpp_dmabuf.cpp
Normal file
127
osal/mpp_dmabuf.cpp
Normal file
@@ -0,0 +1,127 @@
|
||||
/* SPDX-License-Identifier: Apache-2.0 OR MIT */
|
||||
/*
|
||||
* Copyright (c) 2023 Rockchip Electronics Co., Ltd.
|
||||
*/
|
||||
|
||||
#include <string.h>
|
||||
#include <errno.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <linux/dma-buf.h>
|
||||
|
||||
#include "mpp_log.h"
|
||||
#include "mpp_dmabuf.h"
|
||||
|
||||
/* SET_NAME and SYNC_PARTIAL are supported after 4.4 kernel */
|
||||
|
||||
/* Add dma buffer name uapi */
|
||||
#ifndef DMA_BUF_SET_NAME
|
||||
/* 32/64bitness of this uapi was botched in android, there's no difference
|
||||
* between them in actual uapi, they're just different numbers.
|
||||
*/
|
||||
#define DMA_BUF_SET_NAME _IOW(DMA_BUF_BASE, 1, const char *)
|
||||
#define DMA_BUF_SET_NAME_A _IOW(DMA_BUF_BASE, 1, __u32)
|
||||
#define DMA_BUF_SET_NAME_B _IOW(DMA_BUF_BASE, 1, __u64)
|
||||
#endif
|
||||
|
||||
/* Add dma buffer sync partial uapi */
|
||||
#ifndef DMA_BUF_IOCTL_SYNC_PARTIAL
|
||||
struct dma_buf_sync_partial {
|
||||
__u64 flags;
|
||||
__u32 offset;
|
||||
__u32 len;
|
||||
};
|
||||
|
||||
#define DMA_BUF_IOCTL_SYNC_PARTIAL _IOW(DMA_BUF_BASE, 2, struct dma_buf_sync_partial)
|
||||
#endif
|
||||
|
||||
#define MPP_NO_PARTIAL_SUPPORT 25 /* ENOTTY */
|
||||
|
||||
static RK_U32 has_partial_ops = 1;
|
||||
|
||||
MPP_RET mpp_dmabuf_sync_begin(RK_S32 fd, RK_S32 ro, const char *caller)
|
||||
{
|
||||
struct dma_buf_sync sync;
|
||||
RK_S32 ret;
|
||||
|
||||
sync.flags = DMA_BUF_SYNC_START | (ro ? DMA_BUF_SYNC_READ : DMA_BUF_SYNC_RW);
|
||||
|
||||
ret = ioctl(fd, DMA_BUF_IOCTL_SYNC, &sync);
|
||||
if (ret) {
|
||||
mpp_err_f("ioctl failed for %s from %s\n", strerror(errno), caller);
|
||||
return MPP_NOK;
|
||||
}
|
||||
|
||||
return MPP_OK;
|
||||
}
|
||||
|
||||
MPP_RET mpp_dmabuf_sync_end(RK_S32 fd, RK_S32 ro, const char *caller)
|
||||
{
|
||||
struct dma_buf_sync sync;
|
||||
RK_S32 ret;
|
||||
|
||||
sync.flags = DMA_BUF_SYNC_END | (ro ? DMA_BUF_SYNC_READ : DMA_BUF_SYNC_RW);
|
||||
|
||||
ret = ioctl(fd, DMA_BUF_IOCTL_SYNC, &sync);
|
||||
if (ret) {
|
||||
mpp_err_f("ioctl failed for %s from %s\n", strerror(errno), caller);
|
||||
return MPP_NOK;
|
||||
}
|
||||
|
||||
return MPP_OK;
|
||||
}
|
||||
|
||||
MPP_RET mpp_dmabuf_sync_partial_begin(RK_S32 fd, RK_S32 ro, RK_U32 offset, RK_U32 length, const char *caller)
|
||||
{
|
||||
if (has_partial_ops) {
|
||||
struct dma_buf_sync_partial sync;
|
||||
RK_S32 ret;
|
||||
|
||||
sync.flags = DMA_BUF_SYNC_START | (ro ? DMA_BUF_SYNC_READ : DMA_BUF_SYNC_RW);
|
||||
sync.offset = offset;
|
||||
sync.len = length;
|
||||
|
||||
ret = ioctl(fd, DMA_BUF_IOCTL_SYNC_PARTIAL, &sync);
|
||||
if (ret) {
|
||||
if (errno == MPP_NO_PARTIAL_SUPPORT) {
|
||||
has_partial_ops = 0;
|
||||
goto NOT_SUPPORT;
|
||||
}
|
||||
|
||||
mpp_err_f("ioctl failed for %s from %s\n", strerror(errno), caller);
|
||||
return MPP_NOK;
|
||||
}
|
||||
|
||||
return MPP_OK;
|
||||
}
|
||||
|
||||
NOT_SUPPORT:
|
||||
return mpp_dmabuf_sync_begin(fd, ro, caller);
|
||||
}
|
||||
|
||||
MPP_RET mpp_dmabuf_sync_partial_end(RK_S32 fd, RK_S32 ro, RK_U32 offset, RK_U32 length, const char *caller)
|
||||
{
|
||||
if (has_partial_ops) {
|
||||
struct dma_buf_sync_partial sync;
|
||||
RK_S32 ret;
|
||||
|
||||
sync.flags = DMA_BUF_SYNC_END | (ro ? DMA_BUF_SYNC_READ : DMA_BUF_SYNC_RW);
|
||||
sync.offset = offset;
|
||||
sync.len = length;
|
||||
|
||||
ret = ioctl(fd, DMA_BUF_IOCTL_SYNC_PARTIAL, &sync);
|
||||
if (ret) {
|
||||
if (errno == MPP_NO_PARTIAL_SUPPORT) {
|
||||
has_partial_ops = 0;
|
||||
goto NOT_SUPPORT;
|
||||
}
|
||||
|
||||
mpp_err_f("ioctl failed for %s from %s\n", strerror(errno), caller);
|
||||
return MPP_NOK;
|
||||
}
|
||||
|
||||
return MPP_OK;
|
||||
}
|
||||
|
||||
NOT_SUPPORT:
|
||||
return mpp_dmabuf_sync_end(fd, ro, caller);
|
||||
}
|
Reference in New Issue
Block a user