fix[utils]: fix hor_stride 24 aligned error

Change-Id: Ife950bd3ae5fac5faffffa4275222fcc7fb9bbe0
Signed-off-by: xueman.ruan <xueman.ruan@rock-chips.com>
This commit is contained in:
xueman.ruan
2023-09-15 16:09:18 +08:00
committed by Herman Chen
parent b81d8a9c4e
commit 7017dbd1b6
3 changed files with 6 additions and 5 deletions

View File

@@ -173,11 +173,11 @@ static RK_S32 check_8_pixel_aligned(RK_S32 workaround, RK_S32 hor_stride,
RK_S32 pixel_aign, RK_S32 pixel_size, RK_S32 pixel_aign, RK_S32 pixel_size,
const char *fmt_name) const char *fmt_name)
{ {
if (!workaround && hor_stride != MPP_ALIGN(hor_stride, pixel_aign * pixel_size)) { if (!workaround && hor_stride != MPP_ALIGN_GEN(hor_stride, pixel_aign * pixel_size)) {
mpp_log("warning: vepu only support 8 aligned horizontal stride in pixel for %s with pixel size %d\n", mpp_log("warning: vepu only support 8 aligned horizontal stride in pixel for %s with pixel size %d\n",
fmt_name, pixel_size); fmt_name, pixel_size);
mpp_log("set byte stride to %d to match the requirement\n", mpp_log("set byte stride to %d to match the requirement\n",
MPP_ALIGN(hor_stride, pixel_aign * pixel_size)); MPP_ALIGN_GEN(hor_stride, pixel_aign * pixel_size));
workaround = 1; workaround = 1;
} }

View File

@@ -40,6 +40,7 @@
#define MPP_SWAP(type, a, b) do {type SWAP_tmp = b; b = a; a = SWAP_tmp;} while(0) #define MPP_SWAP(type, a, b) do {type SWAP_tmp = b; b = a; a = SWAP_tmp;} while(0)
#define MPP_ARRAY_ELEMS(a) (sizeof(a) / sizeof((a)[0])) #define MPP_ARRAY_ELEMS(a) (sizeof(a) / sizeof((a)[0]))
#define MPP_ALIGN(x, a) (((x)+(a)-1)&~((a)-1)) #define MPP_ALIGN(x, a) (((x)+(a)-1)&~((a)-1))
#define MPP_ALIGN_GEN(x, a) (((x)+(a)-1)/(a)*(a))
#define MPP_VSWAP(a, b) { a ^= b; b ^= a; a ^= b; } #define MPP_VSWAP(a, b) { a ^= b; b ^= a; a ^= b; }
#define MPP_RB16(x) ((((const RK_U8*)(x))[0] << 8) | ((const RK_U8*)(x))[1]) #define MPP_RB16(x) ((((const RK_U8*)(x))[0] << 8) | ((const RK_U8*)(x))[1])

View File

@@ -1024,11 +1024,11 @@ static RK_S32 util_check_8_pixel_aligned(RK_S32 workaround, RK_S32 hor_stride,
RK_S32 pixel_aign, RK_S32 pixel_size, RK_S32 pixel_aign, RK_S32 pixel_size,
const char *fmt_name) const char *fmt_name)
{ {
if (!workaround && hor_stride != MPP_ALIGN(hor_stride, pixel_aign * pixel_size)) { if (!workaround && hor_stride != MPP_ALIGN_GEN(hor_stride, pixel_aign * pixel_size)) {
mpp_log("warning: vepu only support 8 aligned horizontal stride in pixel for %s with pixel size %d\n", mpp_log("warning: vepu only support 8 aligned horizontal stride in pixel for %s with pixel size %d\n",
fmt_name, pixel_size); fmt_name, pixel_size);
mpp_log("set byte stride to %d to match the requirement\n", mpp_log("set byte stride to %d to match the requirement\n",
MPP_ALIGN(hor_stride, pixel_aign * pixel_size)); MPP_ALIGN_GEN(hor_stride, pixel_aign * pixel_size));
workaround = 1; workaround = 1;
} }
@@ -1301,7 +1301,7 @@ MPP_RET fill_image(RK_U8 *buf, RK_U32 width, RK_U32 height,
if (util_check_8_pixel_aligned(not_8_pixel, hor_stride, if (util_check_8_pixel_aligned(not_8_pixel, hor_stride,
8, pix_w, "24bit RGB")) { 8, pix_w, "24bit RGB")) {
hor_stride = MPP_ALIGN(hor_stride, 24); hor_stride = MPP_ALIGN_GEN(hor_stride, 24);
not_8_pixel = 1; not_8_pixel = 1;
} }