mirror of
				https://github.com/nyanmisaka/ffmpeg-rockchip.git
				synced 2025-10-31 20:42:49 +08:00 
			
		
		
		
	
		
			
				
	
	
		
			550 lines
		
	
	
		
			18 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			550 lines
		
	
	
		
			18 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| /*
 | |
|  * Copyright (C) 2006-2011 Michael Niedermayer <michaelni@gmx.at>
 | |
|  *               2010      James Darnley <james.darnley@gmail.com>
 | |
|  *
 | |
|  * FFmpeg is free software; you can redistribute it and/or
 | |
|  * modify it under the terms of the GNU Lesser General Public
 | |
|  * License as published by the Free Software Foundation; either
 | |
|  * version 2.1 of the License, or (at your option) any later version.
 | |
|  *
 | |
|  * FFmpeg is distributed in the hope that it will be useful,
 | |
|  * but WITHOUT ANY WARRANTY; without even the implied warranty of
 | |
|  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 | |
|  * Lesser General Public License for more details.
 | |
|  *
 | |
|  * You should have received a copy of the GNU Lesser General Public
 | |
|  * License along with FFmpeg; if not, write to the Free Software
 | |
|  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 | |
|  */
 | |
| 
 | |
| #include "libavutil/avassert.h"
 | |
| #include "libavutil/cpu.h"
 | |
| #include "libavutil/common.h"
 | |
| #include "libavutil/opt.h"
 | |
| #include "libavutil/pixdesc.h"
 | |
| #include "libavutil/imgutils.h"
 | |
| #include "avfilter.h"
 | |
| #include "formats.h"
 | |
| #include "internal.h"
 | |
| #include "video.h"
 | |
| #include "yadif.h"
 | |
| 
 | |
| typedef struct ThreadData {
 | |
|     AVFrame *frame;
 | |
|     int plane;
 | |
|     int w, h;
 | |
|     int parity;
 | |
|     int tff;
 | |
| } ThreadData;
 | |
| 
 | |
| #define CHECK(j)\
 | |
|     {   int score = FFABS(cur[mrefs - 1 + (j)] - cur[prefs - 1 - (j)])\
 | |
|                   + FFABS(cur[mrefs  +(j)] - cur[prefs  -(j)])\
 | |
|                   + FFABS(cur[mrefs + 1 + (j)] - cur[prefs + 1 - (j)]);\
 | |
|         if (score < spatial_score) {\
 | |
|             spatial_score= score;\
 | |
|             spatial_pred= (cur[mrefs  +(j)] + cur[prefs  -(j)])>>1;\
 | |
| 
 | |
| /* The is_not_edge argument here controls when the code will enter a branch
 | |
|  * which reads up to and including x-3 and x+3. */
 | |
| 
 | |
| #define FILTER(start, end, is_not_edge) \
 | |
|     for (x = start;  x < end; x++) { \
 | |
|         int c = cur[mrefs]; \
 | |
|         int d = (prev2[0] + next2[0])>>1; \
 | |
|         int e = cur[prefs]; \
 | |
|         int temporal_diff0 = FFABS(prev2[0] - next2[0]); \
 | |
|         int temporal_diff1 =(FFABS(prev[mrefs] - c) + FFABS(prev[prefs] - e) )>>1; \
 | |
|         int temporal_diff2 =(FFABS(next[mrefs] - c) + FFABS(next[prefs] - e) )>>1; \
 | |
|         int diff = FFMAX3(temporal_diff0 >> 1, temporal_diff1, temporal_diff2); \
 | |
|         int spatial_pred = (c+e) >> 1; \
 | |
|  \
 | |
|         if (is_not_edge) {\
 | |
|             int spatial_score = FFABS(cur[mrefs - 1] - cur[prefs - 1]) + FFABS(c-e) \
 | |
|                               + FFABS(cur[mrefs + 1] - cur[prefs + 1]) - 1; \
 | |
|             CHECK(-1) CHECK(-2) }} }} \
 | |
|             CHECK( 1) CHECK( 2) }} }} \
 | |
|         }\
 | |
|  \
 | |
|         if (!(mode&2)) { \
 | |
|             int b = (prev2[2 * mrefs] + next2[2 * mrefs])>>1; \
 | |
|             int f = (prev2[2 * prefs] + next2[2 * prefs])>>1; \
 | |
|             int max = FFMAX3(d - e, d - c, FFMIN(b - c, f - e)); \
 | |
|             int min = FFMIN3(d - e, d - c, FFMAX(b - c, f - e)); \
 | |
|  \
 | |
|             diff = FFMAX3(diff, min, -max); \
 | |
|         } \
 | |
|  \
 | |
|         if (spatial_pred > d + diff) \
 | |
|            spatial_pred = d + diff; \
 | |
|         else if (spatial_pred < d - diff) \
 | |
|            spatial_pred = d - diff; \
 | |
|  \
 | |
|         dst[0] = spatial_pred; \
 | |
|  \
 | |
|         dst++; \
 | |
|         cur++; \
 | |
|         prev++; \
 | |
|         next++; \
 | |
|         prev2++; \
 | |
|         next2++; \
 | |
|     }
 | |
| 
 | |
| static void filter_line_c(void *dst1,
 | |
|                           void *prev1, void *cur1, void *next1,
 | |
|                           int w, int prefs, int mrefs, int parity, int mode)
 | |
| {
 | |
|     uint8_t *dst  = dst1;
 | |
|     uint8_t *prev = prev1;
 | |
|     uint8_t *cur  = cur1;
 | |
|     uint8_t *next = next1;
 | |
|     int x;
 | |
|     uint8_t *prev2 = parity ? prev : cur ;
 | |
|     uint8_t *next2 = parity ? cur  : next;
 | |
| 
 | |
|     /* The function is called with the pointers already pointing to data[3] and
 | |
|      * with 6 subtracted from the width.  This allows the FILTER macro to be
 | |
|      * called so that it processes all the pixels normally.  A constant value of
 | |
|      * true for is_not_edge lets the compiler ignore the if statement. */
 | |
|     FILTER(0, w, 1)
 | |
| }
 | |
| 
 | |
| #define MAX_ALIGN 8
 | |
| static void filter_edges(void *dst1, void *prev1, void *cur1, void *next1,
 | |
|                          int w, int prefs, int mrefs, int parity, int mode)
 | |
| {
 | |
|     uint8_t *dst  = dst1;
 | |
|     uint8_t *prev = prev1;
 | |
|     uint8_t *cur  = cur1;
 | |
|     uint8_t *next = next1;
 | |
|     int x;
 | |
|     uint8_t *prev2 = parity ? prev : cur ;
 | |
|     uint8_t *next2 = parity ? cur  : next;
 | |
| 
 | |
|     /* Only edge pixels need to be processed here.  A constant value of false
 | |
|      * for is_not_edge should let the compiler ignore the whole branch. */
 | |
|     FILTER(0, 3, 0)
 | |
| 
 | |
|     dst  = (uint8_t*)dst1  + w - (MAX_ALIGN-1);
 | |
|     prev = (uint8_t*)prev1 + w - (MAX_ALIGN-1);
 | |
|     cur  = (uint8_t*)cur1  + w - (MAX_ALIGN-1);
 | |
|     next = (uint8_t*)next1 + w - (MAX_ALIGN-1);
 | |
|     prev2 = (uint8_t*)(parity ? prev : cur);
 | |
|     next2 = (uint8_t*)(parity ? cur  : next);
 | |
| 
 | |
|     FILTER(w - (MAX_ALIGN-1), w - 3, 1)
 | |
|     FILTER(w - 3, w, 0)
 | |
| }
 | |
| 
 | |
| 
 | |
| static void filter_line_c_16bit(void *dst1,
 | |
|                                 void *prev1, void *cur1, void *next1,
 | |
|                                 int w, int prefs, int mrefs, int parity,
 | |
|                                 int mode)
 | |
| {
 | |
|     uint16_t *dst  = dst1;
 | |
|     uint16_t *prev = prev1;
 | |
|     uint16_t *cur  = cur1;
 | |
|     uint16_t *next = next1;
 | |
|     int x;
 | |
|     uint16_t *prev2 = parity ? prev : cur ;
 | |
|     uint16_t *next2 = parity ? cur  : next;
 | |
|     mrefs /= 2;
 | |
|     prefs /= 2;
 | |
| 
 | |
|     FILTER(0, w, 1)
 | |
| }
 | |
| 
 | |
| static void filter_edges_16bit(void *dst1, void *prev1, void *cur1, void *next1,
 | |
|                                int w, int prefs, int mrefs, int parity, int mode)
 | |
| {
 | |
|     uint16_t *dst  = dst1;
 | |
|     uint16_t *prev = prev1;
 | |
|     uint16_t *cur  = cur1;
 | |
|     uint16_t *next = next1;
 | |
|     int x;
 | |
|     uint16_t *prev2 = parity ? prev : cur ;
 | |
|     uint16_t *next2 = parity ? cur  : next;
 | |
|     mrefs /= 2;
 | |
|     prefs /= 2;
 | |
| 
 | |
|     FILTER(0, 3, 0)
 | |
| 
 | |
|     dst   = (uint16_t*)dst1  + w - (MAX_ALIGN/2-1);
 | |
|     prev  = (uint16_t*)prev1 + w - (MAX_ALIGN/2-1);
 | |
|     cur   = (uint16_t*)cur1  + w - (MAX_ALIGN/2-1);
 | |
|     next  = (uint16_t*)next1 + w - (MAX_ALIGN/2-1);
 | |
|     prev2 = (uint16_t*)(parity ? prev : cur);
 | |
|     next2 = (uint16_t*)(parity ? cur  : next);
 | |
| 
 | |
|     FILTER(w - (MAX_ALIGN/2-1), w - 3, 1)
 | |
|     FILTER(w - 3, w, 0)
 | |
| }
 | |
| 
 | |
| static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
 | |
| {
 | |
|     YADIFContext *s = ctx->priv;
 | |
|     ThreadData *td  = arg;
 | |
|     int refs = s->cur->linesize[td->plane];
 | |
|     int df = (s->csp->comp[td->plane].depth_minus1 + 8) / 8;
 | |
|     int pix_3 = 3 * df;
 | |
|     int slice_start = (td->h *  jobnr   ) / nb_jobs;
 | |
|     int slice_end   = (td->h * (jobnr+1)) / nb_jobs;
 | |
|     int y;
 | |
| 
 | |
|     /* filtering reads 3 pixels to the left/right; to avoid invalid reads,
 | |
|      * we need to call the c variant which avoids this for border pixels
 | |
|      */
 | |
|     for (y = slice_start; y < slice_end; y++) {
 | |
|         if ((y ^ td->parity) & 1) {
 | |
|             uint8_t *prev = &s->prev->data[td->plane][y * refs];
 | |
|             uint8_t *cur  = &s->cur ->data[td->plane][y * refs];
 | |
|             uint8_t *next = &s->next->data[td->plane][y * refs];
 | |
|             uint8_t *dst  = &td->frame->data[td->plane][y * td->frame->linesize[td->plane]];
 | |
|             int     mode  = y == 1 || y + 2 == td->h ? 2 : s->mode;
 | |
|             s->filter_line(dst + pix_3, prev + pix_3, cur + pix_3,
 | |
|                            next + pix_3, td->w - (3 + MAX_ALIGN/df-1),
 | |
|                            y + 1 < td->h ? refs : -refs,
 | |
|                            y ? -refs : refs,
 | |
|                            td->parity ^ td->tff, mode);
 | |
|             s->filter_edges(dst, prev, cur, next, td->w,
 | |
|                             y + 1 < td->h ? refs : -refs,
 | |
|                             y ? -refs : refs,
 | |
|                             td->parity ^ td->tff, mode);
 | |
|         } else {
 | |
|             memcpy(&td->frame->data[td->plane][y * td->frame->linesize[td->plane]],
 | |
|                    &s->cur->data[td->plane][y * refs], td->w * df);
 | |
|         }
 | |
|     }
 | |
|     return 0;
 | |
| }
 | |
| 
 | |
| static void filter(AVFilterContext *ctx, AVFrame *dstpic,
 | |
|                    int parity, int tff)
 | |
| {
 | |
|     YADIFContext *yadif = ctx->priv;
 | |
|     ThreadData td = { .frame = dstpic, .parity = parity, .tff = tff };
 | |
|     int i;
 | |
| 
 | |
|     for (i = 0; i < yadif->csp->nb_components; i++) {
 | |
|         int w = dstpic->width;
 | |
|         int h = dstpic->height;
 | |
| 
 | |
|         if (i == 1 || i == 2) {
 | |
|             w = FF_CEIL_RSHIFT(w, yadif->csp->log2_chroma_w);
 | |
|             h = FF_CEIL_RSHIFT(h, yadif->csp->log2_chroma_h);
 | |
|         }
 | |
| 
 | |
| 
 | |
|         td.w       = w;
 | |
|         td.h       = h;
 | |
|         td.plane   = i;
 | |
| 
 | |
|         ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(h, ctx->graph->nb_threads));
 | |
|     }
 | |
| 
 | |
|     emms_c();
 | |
| }
 | |
| 
 | |
| static int return_frame(AVFilterContext *ctx, int is_second)
 | |
| {
 | |
|     YADIFContext *yadif = ctx->priv;
 | |
|     AVFilterLink *link  = ctx->outputs[0];
 | |
|     int tff, ret;
 | |
| 
 | |
|     if (yadif->parity == -1) {
 | |
|         tff = yadif->cur->interlaced_frame ?
 | |
|               yadif->cur->top_field_first : 1;
 | |
|     } else {
 | |
|         tff = yadif->parity ^ 1;
 | |
|     }
 | |
| 
 | |
|     if (is_second) {
 | |
|         yadif->out = ff_get_video_buffer(link, link->w, link->h);
 | |
|         if (!yadif->out)
 | |
|             return AVERROR(ENOMEM);
 | |
| 
 | |
|         av_frame_copy_props(yadif->out, yadif->cur);
 | |
|         yadif->out->interlaced_frame = 0;
 | |
|     }
 | |
| 
 | |
|     filter(ctx, yadif->out, tff ^ !is_second, tff);
 | |
| 
 | |
|     if (is_second) {
 | |
|         int64_t cur_pts  = yadif->cur->pts;
 | |
|         int64_t next_pts = yadif->next->pts;
 | |
| 
 | |
|         if (next_pts != AV_NOPTS_VALUE && cur_pts != AV_NOPTS_VALUE) {
 | |
|             yadif->out->pts = cur_pts + next_pts;
 | |
|         } else {
 | |
|             yadif->out->pts = AV_NOPTS_VALUE;
 | |
|         }
 | |
|     }
 | |
|     ret = ff_filter_frame(ctx->outputs[0], yadif->out);
 | |
| 
 | |
|     yadif->frame_pending = (yadif->mode&1) && !is_second;
 | |
|     return ret;
 | |
| }
 | |
| 
 | |
| static int checkstride(YADIFContext *yadif, const AVFrame *a, const AVFrame *b)
 | |
| {
 | |
|     int i;
 | |
|     for (i = 0; i < yadif->csp->nb_components; i++)
 | |
|         if (a->linesize[i] != b->linesize[i])
 | |
|             return 1;
 | |
|     return 0;
 | |
| }
 | |
| 
 | |
| static void fixstride(AVFilterLink *link, AVFrame *f)
 | |
| {
 | |
|     AVFrame *dst = ff_default_get_video_buffer(link, f->width, f->height);
 | |
|     if(!dst)
 | |
|         return;
 | |
|     av_frame_copy_props(dst, f);
 | |
|     av_image_copy(dst->data, dst->linesize,
 | |
|                   (const uint8_t **)f->data, f->linesize,
 | |
|                   dst->format, dst->width, dst->height);
 | |
|     av_frame_unref(f);
 | |
|     av_frame_move_ref(f, dst);
 | |
|     av_frame_free(&dst);
 | |
| }
 | |
| 
 | |
| static int filter_frame(AVFilterLink *link, AVFrame *frame)
 | |
| {
 | |
|     AVFilterContext *ctx = link->dst;
 | |
|     YADIFContext *yadif = ctx->priv;
 | |
| 
 | |
|     av_assert0(frame);
 | |
| 
 | |
|     if (yadif->frame_pending)
 | |
|         return_frame(ctx, 1);
 | |
| 
 | |
|     if (yadif->prev)
 | |
|         av_frame_free(&yadif->prev);
 | |
|     yadif->prev = yadif->cur;
 | |
|     yadif->cur  = yadif->next;
 | |
|     yadif->next = frame;
 | |
| 
 | |
|     if (!yadif->cur &&
 | |
|         !(yadif->cur = av_frame_clone(yadif->next)))
 | |
|         return AVERROR(ENOMEM);
 | |
| 
 | |
|     if (checkstride(yadif, yadif->next, yadif->cur)) {
 | |
|         av_log(ctx, AV_LOG_VERBOSE, "Reallocating frame due to differing stride\n");
 | |
|         fixstride(link, yadif->next);
 | |
|     }
 | |
|     if (checkstride(yadif, yadif->next, yadif->cur))
 | |
|         fixstride(link, yadif->cur);
 | |
|     if (yadif->prev && checkstride(yadif, yadif->next, yadif->prev))
 | |
|         fixstride(link, yadif->prev);
 | |
|     if (checkstride(yadif, yadif->next, yadif->cur) || (yadif->prev && checkstride(yadif, yadif->next, yadif->prev))) {
 | |
|         av_log(ctx, AV_LOG_ERROR, "Failed to reallocate frame\n");
 | |
|         return -1;
 | |
|     }
 | |
| 
 | |
|     if ((yadif->deint && !yadif->cur->interlaced_frame) || ctx->is_disabled) {
 | |
|         yadif->out  = av_frame_clone(yadif->cur);
 | |
|         if (!yadif->out)
 | |
|             return AVERROR(ENOMEM);
 | |
| 
 | |
|         av_frame_free(&yadif->prev);
 | |
|         if (yadif->out->pts != AV_NOPTS_VALUE)
 | |
|             yadif->out->pts *= 2;
 | |
|         return ff_filter_frame(ctx->outputs[0], yadif->out);
 | |
|     }
 | |
| 
 | |
|     if (!yadif->prev)
 | |
|         return 0;
 | |
| 
 | |
|     yadif->out = ff_get_video_buffer(ctx->outputs[0], link->w, link->h);
 | |
|     if (!yadif->out)
 | |
|         return AVERROR(ENOMEM);
 | |
| 
 | |
|     av_frame_copy_props(yadif->out, yadif->cur);
 | |
|     yadif->out->interlaced_frame = 0;
 | |
| 
 | |
|     if (yadif->out->pts != AV_NOPTS_VALUE)
 | |
|         yadif->out->pts *= 2;
 | |
| 
 | |
|     return return_frame(ctx, 0);
 | |
| }
 | |
| 
 | |
| static int request_frame(AVFilterLink *link)
 | |
| {
 | |
|     AVFilterContext *ctx = link->src;
 | |
|     YADIFContext *yadif = ctx->priv;
 | |
| 
 | |
|     if (yadif->frame_pending) {
 | |
|         return_frame(ctx, 1);
 | |
|         return 0;
 | |
|     }
 | |
| 
 | |
|     do {
 | |
|         int ret;
 | |
| 
 | |
|         if (yadif->eof)
 | |
|             return AVERROR_EOF;
 | |
| 
 | |
|         ret  = ff_request_frame(link->src->inputs[0]);
 | |
| 
 | |
|         if (ret == AVERROR_EOF && yadif->cur) {
 | |
|             AVFrame *next = av_frame_clone(yadif->next);
 | |
| 
 | |
|             if (!next)
 | |
|                 return AVERROR(ENOMEM);
 | |
| 
 | |
|             next->pts = yadif->next->pts * 2 - yadif->cur->pts;
 | |
| 
 | |
|             filter_frame(link->src->inputs[0], next);
 | |
|             yadif->eof = 1;
 | |
|         } else if (ret < 0) {
 | |
|             return ret;
 | |
|         }
 | |
|     } while (!yadif->prev);
 | |
| 
 | |
|     return 0;
 | |
| }
 | |
| 
 | |
| static av_cold void uninit(AVFilterContext *ctx)
 | |
| {
 | |
|     YADIFContext *yadif = ctx->priv;
 | |
| 
 | |
|     av_frame_free(&yadif->prev);
 | |
|     av_frame_free(&yadif->cur );
 | |
|     av_frame_free(&yadif->next);
 | |
| }
 | |
| 
 | |
| static int query_formats(AVFilterContext *ctx)
 | |
| {
 | |
|     static const enum AVPixelFormat pix_fmts[] = {
 | |
|         AV_PIX_FMT_YUV420P,
 | |
|         AV_PIX_FMT_YUV422P,
 | |
|         AV_PIX_FMT_YUV444P,
 | |
|         AV_PIX_FMT_YUV410P,
 | |
|         AV_PIX_FMT_YUV411P,
 | |
|         AV_PIX_FMT_GRAY8,
 | |
|         AV_PIX_FMT_YUVJ420P,
 | |
|         AV_PIX_FMT_YUVJ422P,
 | |
|         AV_PIX_FMT_YUVJ444P,
 | |
|         AV_PIX_FMT_GRAY16,
 | |
|         AV_PIX_FMT_YUV440P,
 | |
|         AV_PIX_FMT_YUVJ440P,
 | |
|         AV_PIX_FMT_YUV420P9,
 | |
|         AV_PIX_FMT_YUV422P9,
 | |
|         AV_PIX_FMT_YUV444P9,
 | |
|         AV_PIX_FMT_YUV420P10,
 | |
|         AV_PIX_FMT_YUV422P10,
 | |
|         AV_PIX_FMT_YUV444P10,
 | |
|         AV_PIX_FMT_YUV420P12,
 | |
|         AV_PIX_FMT_YUV422P12,
 | |
|         AV_PIX_FMT_YUV444P12,
 | |
|         AV_PIX_FMT_YUV420P14,
 | |
|         AV_PIX_FMT_YUV422P14,
 | |
|         AV_PIX_FMT_YUV444P14,
 | |
|         AV_PIX_FMT_YUV420P16,
 | |
|         AV_PIX_FMT_YUV422P16,
 | |
|         AV_PIX_FMT_YUV444P16,
 | |
|         AV_PIX_FMT_YUVA420P,
 | |
|         AV_PIX_FMT_YUVA422P,
 | |
|         AV_PIX_FMT_YUVA444P,
 | |
|         AV_PIX_FMT_GBRP,
 | |
|         AV_PIX_FMT_GBRAP,
 | |
|         AV_PIX_FMT_NONE
 | |
|     };
 | |
| 
 | |
|     ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
 | |
| 
 | |
|     return 0;
 | |
| }
 | |
| 
 | |
| static int config_props(AVFilterLink *link)
 | |
| {
 | |
|     AVFilterContext *ctx = link->src;
 | |
|     YADIFContext *s = link->src->priv;
 | |
| 
 | |
|     link->time_base.num = link->src->inputs[0]->time_base.num;
 | |
|     link->time_base.den = link->src->inputs[0]->time_base.den * 2;
 | |
|     link->w             = link->src->inputs[0]->w;
 | |
|     link->h             = link->src->inputs[0]->h;
 | |
| 
 | |
|     if(s->mode&1)
 | |
|         link->frame_rate = av_mul_q(link->src->inputs[0]->frame_rate, (AVRational){2,1});
 | |
| 
 | |
|     if (link->w < 3 || link->h < 3) {
 | |
|         av_log(ctx, AV_LOG_ERROR, "Video of less than 3 columns or lines is not supported\n");
 | |
|         return AVERROR(EINVAL);
 | |
|     }
 | |
| 
 | |
|     s->csp = av_pix_fmt_desc_get(link->format);
 | |
|     if (s->csp->comp[0].depth_minus1 / 8 == 1) {
 | |
|         s->filter_line  = filter_line_c_16bit;
 | |
|         s->filter_edges = filter_edges_16bit;
 | |
|     } else {
 | |
|         s->filter_line  = filter_line_c;
 | |
|         s->filter_edges = filter_edges;
 | |
|     }
 | |
| 
 | |
|     if (ARCH_X86)
 | |
|         ff_yadif_init_x86(s);
 | |
| 
 | |
|     return 0;
 | |
| }
 | |
| 
 | |
| 
 | |
| #define OFFSET(x) offsetof(YADIFContext, x)
 | |
| #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
 | |
| 
 | |
| #define CONST(name, help, val, unit) { name, help, 0, AV_OPT_TYPE_CONST, {.i64=val}, INT_MIN, INT_MAX, FLAGS, unit }
 | |
| 
 | |
| static const AVOption yadif_options[] = {
 | |
|     { "mode",   "specify the interlacing mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=YADIF_MODE_SEND_FRAME}, 0, 3, FLAGS, "mode"},
 | |
|     CONST("send_frame",           "send one frame for each frame",                                     YADIF_MODE_SEND_FRAME,           "mode"),
 | |
|     CONST("send_field",           "send one frame for each field",                                     YADIF_MODE_SEND_FIELD,           "mode"),
 | |
|     CONST("send_frame_nospatial", "send one frame for each frame, but skip spatial interlacing check", YADIF_MODE_SEND_FRAME_NOSPATIAL, "mode"),
 | |
|     CONST("send_field_nospatial", "send one frame for each field, but skip spatial interlacing check", YADIF_MODE_SEND_FIELD_NOSPATIAL, "mode"),
 | |
| 
 | |
|     { "parity", "specify the assumed picture field parity", OFFSET(parity), AV_OPT_TYPE_INT, {.i64=YADIF_PARITY_AUTO}, -1, 1, FLAGS, "parity" },
 | |
|     CONST("tff",  "assume top field first",    YADIF_PARITY_TFF,  "parity"),
 | |
|     CONST("bff",  "assume bottom field first", YADIF_PARITY_BFF,  "parity"),
 | |
|     CONST("auto", "auto detect parity",        YADIF_PARITY_AUTO, "parity"),
 | |
| 
 | |
|     { "deint", "specify which frames to deinterlace", OFFSET(deint), AV_OPT_TYPE_INT, {.i64=YADIF_DEINT_ALL}, 0, 1, FLAGS, "deint" },
 | |
|     CONST("all",        "deinterlace all frames",                       YADIF_DEINT_ALL,         "deint"),
 | |
|     CONST("interlaced", "only deinterlace frames marked as interlaced", YADIF_DEINT_INTERLACED,  "deint"),
 | |
| 
 | |
|     { NULL }
 | |
| };
 | |
| 
 | |
| AVFILTER_DEFINE_CLASS(yadif);
 | |
| 
 | |
| static const AVFilterPad avfilter_vf_yadif_inputs[] = {
 | |
|     {
 | |
|         .name          = "default",
 | |
|         .type          = AVMEDIA_TYPE_VIDEO,
 | |
|         .filter_frame  = filter_frame,
 | |
|     },
 | |
|     { NULL }
 | |
| };
 | |
| 
 | |
| static const AVFilterPad avfilter_vf_yadif_outputs[] = {
 | |
|     {
 | |
|         .name          = "default",
 | |
|         .type          = AVMEDIA_TYPE_VIDEO,
 | |
|         .request_frame = request_frame,
 | |
|         .config_props  = config_props,
 | |
|     },
 | |
|     { NULL }
 | |
| };
 | |
| 
 | |
| AVFilter ff_vf_yadif = {
 | |
|     .name          = "yadif",
 | |
|     .description   = NULL_IF_CONFIG_SMALL("Deinterlace the input image."),
 | |
|     .priv_size     = sizeof(YADIFContext),
 | |
|     .priv_class    = &yadif_class,
 | |
|     .uninit        = uninit,
 | |
|     .query_formats = query_formats,
 | |
|     .inputs        = avfilter_vf_yadif_inputs,
 | |
|     .outputs       = avfilter_vf_yadif_outputs,
 | |
|     .flags         = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | AVFILTER_FLAG_SLICE_THREADS,
 | |
| };
 | 
