From 4ca6be39eba08bf593e69f72f4a678f17a9a568c Mon Sep 17 00:00:00 2001 From: Herman Chen Date: Fri, 24 Jul 2020 11:06:51 +0800 Subject: [PATCH] [hal_h264e_vepu]: Fix width / stride relationship Vepu only support match width / stride configure. This should be setup according to input format. Change-Id: I2cd4e6bbf4c3888db0402096f80a6896c9a4bf53 Signed-off-by: Herman Chen --- mpp/hal/vpu/h264e/hal_h264e_vepu_v2.c | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/mpp/hal/vpu/h264e/hal_h264e_vepu_v2.c b/mpp/hal/vpu/h264e/hal_h264e_vepu_v2.c index 155290f1..c0dea2f3 100644 --- a/mpp/hal/vpu/h264e/hal_h264e_vepu_v2.c +++ b/mpp/hal/vpu/h264e/hal_h264e_vepu_v2.c @@ -409,7 +409,8 @@ MPP_RET h264e_vepu_prep_setup(HalH264eVepuPrep *prep, MppEncPrepCfg *cfg) } } - RK_S32 hor_stride = cfg->hor_stride; + /* NOTE: vepu only support 8bit encoding and stride must match with width align to 16 */ + RK_S32 hor_stride = MPP_ALIGN(cfg->width, 16); RK_S32 ver_stride = cfg->ver_stride; prep->offset_cb = 0; prep->offset_cr = 0; @@ -419,22 +420,38 @@ MPP_RET h264e_vepu_prep_setup(HalH264eVepuPrep *prep, MppEncPrepCfg *cfg) prep->offset_cb = hor_stride * ver_stride; prep->size_y = hor_stride * MPP_ALIGN(prep->src_h, 16); prep->size_c = hor_stride / 2 * MPP_ALIGN(prep->src_h / 2, 8); + + if (cfg->hor_stride != MPP_ALIGN(cfg->width, 16)) + mpp_log_f("vepu only support 16byte aligned YUV420SP horizontal stride %d vs width %d\n", + cfg->hor_stride, cfg->width); } break; case MPP_FMT_YUV420P : { prep->offset_cb = hor_stride * ver_stride; prep->offset_cr = prep->offset_cb + ((hor_stride * ver_stride) / 4); prep->size_y = hor_stride * MPP_ALIGN(prep->src_h, 16); prep->size_c = hor_stride / 2 * MPP_ALIGN(prep->src_h / 2, 8); + + if (cfg->hor_stride != MPP_ALIGN(cfg->width, 16)) + mpp_log_f("vepu only support 16byte aligned YUV420P horizontal stride %d vs width %d\n", + cfg->hor_stride, cfg->width); } break; case MPP_FMT_YUV422_YUYV : case MPP_FMT_YUV422_UYVY : { prep->size_y = hor_stride * 2 * MPP_ALIGN(prep->src_h, 16); prep->size_c = 0; + + if (cfg->hor_stride != (MPP_ALIGN(cfg->width, 16) * 2)) + mpp_log_f("vepu only support 16 pixel aligned YUV422 horizontal stride %d vs width %d\n", + cfg->hor_stride, cfg->width); } break; case MPP_FMT_RGB565 : case MPP_FMT_BGR444 : { prep->size_y = hor_stride * 2 * MPP_ALIGN(prep->src_h, 16); prep->size_c = 0; + + if (cfg->hor_stride != cfg->width * 2) + mpp_log_f("vepu only support matched 16bit pixel horizontal stride %d vs width %d\n", + cfg->hor_stride, cfg->width); } break; case MPP_FMT_BGR888 : case MPP_FMT_RGB888 : @@ -443,6 +460,10 @@ MPP_RET h264e_vepu_prep_setup(HalH264eVepuPrep *prep, MppEncPrepCfg *cfg) case MPP_FMT_BGR101010 : { prep->size_y = hor_stride * 4 * MPP_ALIGN(prep->src_h, 16); prep->size_c = 0; + + if (cfg->hor_stride != cfg->width * 4) + mpp_log_f("vepu only support matched 32bit pixel horizontal stride %d vs width %d\n", + cfg->hor_stride, cfg->width); } break; default: { mpp_err_f("invalid format %d", format);