diff --git a/mpp/hal/vpu/common/vepu_common.c b/mpp/hal/vpu/common/vepu_common.c index e49a5973..491c82eb 100644 --- a/mpp/hal/vpu/common/vepu_common.c +++ b/mpp/hal/vpu/common/vepu_common.c @@ -173,11 +173,11 @@ static RK_S32 check_8_pixel_aligned(RK_S32 workaround, RK_S32 hor_stride, RK_S32 pixel_aign, RK_S32 pixel_size, const char *fmt_name) { - if (!workaround && hor_stride != MPP_ALIGN(hor_stride, pixel_aign * pixel_size)) { + if (!workaround && hor_stride != MPP_ALIGN_GEN(hor_stride, pixel_aign * pixel_size)) { mpp_log("warning: vepu only support 8 aligned horizontal stride in pixel for %s with pixel size %d\n", fmt_name, pixel_size); mpp_log("set byte stride to %d to match the requirement\n", - MPP_ALIGN(hor_stride, pixel_aign * pixel_size)); + MPP_ALIGN_GEN(hor_stride, pixel_aign * pixel_size)); workaround = 1; } diff --git a/osal/inc/mpp_common.h b/osal/inc/mpp_common.h index faa04929..b80c0c4f 100644 --- a/osal/inc/mpp_common.h +++ b/osal/inc/mpp_common.h @@ -40,6 +40,7 @@ #define MPP_SWAP(type, a, b) do {type SWAP_tmp = b; b = a; a = SWAP_tmp;} while(0) #define MPP_ARRAY_ELEMS(a) (sizeof(a) / sizeof((a)[0])) #define MPP_ALIGN(x, a) (((x)+(a)-1)&~((a)-1)) +#define MPP_ALIGN_GEN(x, a) (((x)+(a)-1)/(a)*(a)) #define MPP_VSWAP(a, b) { a ^= b; b ^= a; a ^= b; } #define MPP_RB16(x) ((((const RK_U8*)(x))[0] << 8) | ((const RK_U8*)(x))[1]) diff --git a/utils/utils.c b/utils/utils.c index e00cc4fd..91399e2d 100644 --- a/utils/utils.c +++ b/utils/utils.c @@ -1024,11 +1024,11 @@ static RK_S32 util_check_8_pixel_aligned(RK_S32 workaround, RK_S32 hor_stride, RK_S32 pixel_aign, RK_S32 pixel_size, const char *fmt_name) { - if (!workaround && hor_stride != MPP_ALIGN(hor_stride, pixel_aign * pixel_size)) { + if (!workaround && hor_stride != MPP_ALIGN_GEN(hor_stride, pixel_aign * pixel_size)) { mpp_log("warning: vepu only support 8 aligned horizontal stride in pixel for %s with pixel size %d\n", fmt_name, pixel_size); mpp_log("set byte stride to %d to match the requirement\n", - MPP_ALIGN(hor_stride, pixel_aign * pixel_size)); + MPP_ALIGN_GEN(hor_stride, pixel_aign * pixel_size)); workaround = 1; } @@ -1301,7 +1301,7 @@ MPP_RET fill_image(RK_U8 *buf, RK_U32 width, RK_U32 height, if (util_check_8_pixel_aligned(not_8_pixel, hor_stride, 8, pix_w, "24bit RGB")) { - hor_stride = MPP_ALIGN(hor_stride, 24); + hor_stride = MPP_ALIGN_GEN(hor_stride, 24); not_8_pixel = 1; }