• Main Page
  • Related Pages
  • Modules
  • Data Structures
  • Files
  • Examples
  • File List
  • Globals

libavcodec/mpegvideo.c

Go to the documentation of this file.
00001 /*
00002  * The simplest mpeg encoder (well, it was the simplest!)
00003  * Copyright (c) 2000,2001 Fabrice Bellard
00004  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
00005  *
00006  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
00007  *
00008  * This file is part of Libav.
00009  *
00010  * Libav is free software; you can redistribute it and/or
00011  * modify it under the terms of the GNU Lesser General Public
00012  * License as published by the Free Software Foundation; either
00013  * version 2.1 of the License, or (at your option) any later version.
00014  *
00015  * Libav is distributed in the hope that it will be useful,
00016  * but WITHOUT ANY WARRANTY; without even the implied warranty of
00017  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00018  * Lesser General Public License for more details.
00019  *
00020  * You should have received a copy of the GNU Lesser General Public
00021  * License along with Libav; if not, write to the Free Software
00022  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
00023  */
00024 
00030 #include "libavutil/intmath.h"
00031 #include "libavutil/imgutils.h"
00032 #include "avcodec.h"
00033 #include "dsputil.h"
00034 #include "internal.h"
00035 #include "mpegvideo.h"
00036 #include "mpegvideo_common.h"
00037 #include "mjpegenc.h"
00038 #include "msmpeg4.h"
00039 #include "faandct.h"
00040 #include "xvmc_internal.h"
00041 #include "thread.h"
00042 #include <limits.h>
00043 
00044 //#undef NDEBUG
00045 //#include <assert.h>
00046 
00047 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
00048                                    DCTELEM *block, int n, int qscale);
00049 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
00050                                    DCTELEM *block, int n, int qscale);
00051 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
00052                                    DCTELEM *block, int n, int qscale);
00053 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
00054                                    DCTELEM *block, int n, int qscale);
00055 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
00056                                    DCTELEM *block, int n, int qscale);
00057 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
00058                                   DCTELEM *block, int n, int qscale);
00059 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
00060                                   DCTELEM *block, int n, int qscale);
00061 
00062 
00063 /* enable all paranoid tests for rounding, overflows, etc... */
00064 //#define PARANOID
00065 
00066 //#define DEBUG
00067 
00068 
00069 static const uint8_t ff_default_chroma_qscale_table[32] = {
00070 //   0   1   2   3   4   5   6   7   8   9  10  11  12  13  14  15
00071      0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,
00072     16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
00073 };
00074 
00075 const uint8_t ff_mpeg1_dc_scale_table[128] = {
00076 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15
00077     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00078     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00079     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00080     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00081     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00082     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00083     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00084     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00085 };
00086 
00087 static const uint8_t mpeg2_dc_scale_table1[128] = {
00088 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15
00089     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00090     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00091     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00092     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00093     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00094     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00095     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00096     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00097 };
00098 
00099 static const uint8_t mpeg2_dc_scale_table2[128] = {
00100 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15
00101     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00102     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00103     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00104     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00105     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00106     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00107     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00108     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00109 };
00110 
00111 static const uint8_t mpeg2_dc_scale_table3[128] = {
00112 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15
00113     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00114     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00115     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00116     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00117     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00118     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00119     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00120     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00121 };
00122 
00123 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
00124     ff_mpeg1_dc_scale_table,
00125     mpeg2_dc_scale_table1,
00126     mpeg2_dc_scale_table2,
00127     mpeg2_dc_scale_table3,
00128 };
00129 
00130 const enum PixelFormat ff_pixfmt_list_420[] = {
00131     PIX_FMT_YUV420P,
00132     PIX_FMT_NONE
00133 };
00134 
00135 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
00136     PIX_FMT_DXVA2_VLD,
00137     PIX_FMT_VAAPI_VLD,
00138     PIX_FMT_VDA_VLD,
00139     PIX_FMT_YUV420P,
00140     PIX_FMT_NONE
00141 };
00142 
00143 const uint8_t *avpriv_mpv_find_start_code(const uint8_t *restrict p,
00144                                           const uint8_t *end,
00145                                           uint32_t * restrict state)
00146 {
00147     int i;
00148 
00149     assert(p <= end);
00150     if (p >= end)
00151         return end;
00152 
00153     for (i = 0; i < 3; i++) {
00154         uint32_t tmp = *state << 8;
00155         *state = tmp + *(p++);
00156         if (tmp == 0x100 || p == end)
00157             return p;
00158     }
00159 
00160     while (p < end) {
00161         if      (p[-1] > 1      ) p += 3;
00162         else if (p[-2]          ) p += 2;
00163         else if (p[-3]|(p[-1]-1)) p++;
00164         else {
00165             p++;
00166             break;
00167         }
00168     }
00169 
00170     p = FFMIN(p, end) - 4;
00171     *state = AV_RB32(p);
00172 
00173     return p + 4;
00174 }
00175 
00176 /* init common dct for both encoder and decoder */
00177 av_cold int ff_dct_common_init(MpegEncContext *s)
00178 {
00179     dsputil_init(&s->dsp, s->avctx);
00180 
00181     s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
00182     s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
00183     s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
00184     s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
00185     s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
00186     if (s->flags & CODEC_FLAG_BITEXACT)
00187         s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
00188     s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
00189 
00190 #if HAVE_MMX
00191     MPV_common_init_mmx(s);
00192 #elif ARCH_ALPHA
00193     MPV_common_init_axp(s);
00194 #elif CONFIG_MLIB
00195     MPV_common_init_mlib(s);
00196 #elif HAVE_MMI
00197     MPV_common_init_mmi(s);
00198 #elif ARCH_ARM
00199     MPV_common_init_arm(s);
00200 #elif HAVE_ALTIVEC
00201     MPV_common_init_altivec(s);
00202 #elif ARCH_BFIN
00203     MPV_common_init_bfin(s);
00204 #endif
00205 
00206     /* load & permutate scantables
00207      * note: only wmv uses different ones
00208      */
00209     if (s->alternate_scan) {
00210         ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable  , ff_alternate_vertical_scan);
00211         ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable  , ff_alternate_vertical_scan);
00212     } else {
00213         ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable  , ff_zigzag_direct);
00214         ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable  , ff_zigzag_direct);
00215     }
00216     ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
00217     ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
00218 
00219     return 0;
00220 }
00221 
00222 void ff_copy_picture(Picture *dst, Picture *src)
00223 {
00224     *dst = *src;
00225     dst->f.type = FF_BUFFER_TYPE_COPY;
00226 }
00227 
00231 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
00232 {
00233     /* Windows Media Image codecs allocate internal buffers with different
00234      * dimensions; ignore user defined callbacks for these
00235      */
00236     if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
00237         ff_thread_release_buffer(s->avctx, (AVFrame *) pic);
00238     else
00239         avcodec_default_release_buffer(s->avctx, (AVFrame *) pic);
00240     av_freep(&pic->f.hwaccel_picture_private);
00241 }
00242 
00246 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
00247 {
00248     int r;
00249 
00250     if (s->avctx->hwaccel) {
00251         assert(!pic->f.hwaccel_picture_private);
00252         if (s->avctx->hwaccel->priv_data_size) {
00253             pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
00254             if (!pic->f.hwaccel_picture_private) {
00255                 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
00256                 return -1;
00257             }
00258         }
00259     }
00260 
00261     if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
00262         r = ff_thread_get_buffer(s->avctx, (AVFrame *) pic);
00263     else
00264         r = avcodec_default_get_buffer(s->avctx, (AVFrame *) pic);
00265 
00266     if (r < 0 || !pic->f.type || !pic->f.data[0]) {
00267         av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %p)\n",
00268                r, pic->f.type, pic->f.data[0]);
00269         av_freep(&pic->f.hwaccel_picture_private);
00270         return -1;
00271     }
00272 
00273     if (s->linesize && (s->linesize   != pic->f.linesize[0] ||
00274                         s->uvlinesize != pic->f.linesize[1])) {
00275         av_log(s->avctx, AV_LOG_ERROR,
00276                "get_buffer() failed (stride changed)\n");
00277         free_frame_buffer(s, pic);
00278         return -1;
00279     }
00280 
00281     if (pic->f.linesize[1] != pic->f.linesize[2]) {
00282         av_log(s->avctx, AV_LOG_ERROR,
00283                "get_buffer() failed (uv stride mismatch)\n");
00284         free_frame_buffer(s, pic);
00285         return -1;
00286     }
00287 
00288     return 0;
00289 }
00290 
00295 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
00296 {
00297     const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
00298 
00299     // the + 1 is needed so memset(,,stride*height) does not sig11
00300 
00301     const int mb_array_size = s->mb_stride * s->mb_height;
00302     const int b8_array_size = s->b8_stride * s->mb_height * 2;
00303     const int b4_array_size = s->b4_stride * s->mb_height * 4;
00304     int i;
00305     int r = -1;
00306 
00307     if (shared) {
00308         assert(pic->f.data[0]);
00309         assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
00310         pic->f.type = FF_BUFFER_TYPE_SHARED;
00311     } else {
00312         assert(!pic->f.data[0]);
00313 
00314         if (alloc_frame_buffer(s, pic) < 0)
00315             return -1;
00316 
00317         s->linesize   = pic->f.linesize[0];
00318         s->uvlinesize = pic->f.linesize[1];
00319     }
00320 
00321     if (pic->f.qscale_table == NULL) {
00322         if (s->encoding) {
00323             FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
00324                               mb_array_size * sizeof(int16_t), fail)
00325             FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var,
00326                               mb_array_size * sizeof(int16_t), fail)
00327             FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean,
00328                               mb_array_size * sizeof(int8_t ), fail)
00329         }
00330 
00331         FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table,
00332                           mb_array_size * sizeof(uint8_t) + 2, fail)// the + 2 is for the slice end check
00333         FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base,
00334                           (big_mb_num + s->mb_stride) * sizeof(uint8_t),
00335                           fail)
00336         FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base,
00337                           (big_mb_num + s->mb_stride) * sizeof(uint32_t),
00338                           fail)
00339         pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
00340         pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
00341         if (s->out_format == FMT_H264) {
00342             for (i = 0; i < 2; i++) {
00343                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
00344                                   2 * (b4_array_size + 4) * sizeof(int16_t),
00345                                   fail)
00346                 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
00347                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
00348                                   4 * mb_array_size * sizeof(uint8_t), fail)
00349             }
00350             pic->f.motion_subsample_log2 = 2;
00351         } else if (s->out_format == FMT_H263 || s->encoding ||
00352                    (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
00353             for (i = 0; i < 2; i++) {
00354                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
00355                                   2 * (b8_array_size + 4) * sizeof(int16_t),
00356                                   fail)
00357                 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
00358                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
00359                                   4 * mb_array_size * sizeof(uint8_t), fail)
00360             }
00361             pic->f.motion_subsample_log2 = 3;
00362         }
00363         if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
00364             FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff,
00365                               64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
00366         }
00367         pic->f.qstride = s->mb_stride;
00368         FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan,
00369                           1 * sizeof(AVPanScan), fail)
00370     }
00371 
00372     pic->owner2 = s;
00373 
00374     return 0;
00375 fail: // for  the FF_ALLOCZ_OR_GOTO macro
00376     if (r >= 0)
00377         free_frame_buffer(s, pic);
00378     return -1;
00379 }
00380 
00384 static void free_picture(MpegEncContext *s, Picture *pic)
00385 {
00386     int i;
00387 
00388     if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
00389         free_frame_buffer(s, pic);
00390     }
00391 
00392     av_freep(&pic->mb_var);
00393     av_freep(&pic->mc_mb_var);
00394     av_freep(&pic->mb_mean);
00395     av_freep(&pic->f.mbskip_table);
00396     av_freep(&pic->qscale_table_base);
00397     av_freep(&pic->mb_type_base);
00398     av_freep(&pic->f.dct_coeff);
00399     av_freep(&pic->f.pan_scan);
00400     pic->f.mb_type = NULL;
00401     for (i = 0; i < 2; i++) {
00402         av_freep(&pic->motion_val_base[i]);
00403         av_freep(&pic->f.ref_index[i]);
00404     }
00405 
00406     if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
00407         for (i = 0; i < 4; i++) {
00408             pic->f.base[i] =
00409             pic->f.data[i] = NULL;
00410         }
00411         pic->f.type = 0;
00412     }
00413 }
00414 
00415 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base)
00416 {
00417     int y_size = s->b8_stride * (2 * s->mb_height + 1);
00418     int c_size = s->mb_stride * (s->mb_height + 1);
00419     int yc_size = y_size + 2 * c_size;
00420     int i;
00421 
00422     // edge emu needs blocksize + filter length - 1
00423     // (= 17x17 for  halfpel / 21x21 for  h264)
00424     FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer,
00425                       (s->width + 64) * 2 * 21 * 2, fail);    // (width + edge + align)*interlaced*MBsize*tolerance
00426 
00427     // FIXME should be linesize instead of s->width * 2
00428     // but that is not known before get_buffer()
00429     FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad,
00430                       (s->width + 64) * 4 * 16 * 2 * sizeof(uint8_t), fail)
00431     s->me.temp         = s->me.scratchpad;
00432     s->rd_scratchpad   = s->me.scratchpad;
00433     s->b_scratchpad    = s->me.scratchpad;
00434     s->obmc_scratchpad = s->me.scratchpad + 16;
00435     if (s->encoding) {
00436         FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
00437                           ME_MAP_SIZE * sizeof(uint32_t), fail)
00438         FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
00439                           ME_MAP_SIZE * sizeof(uint32_t), fail)
00440         if (s->avctx->noise_reduction) {
00441             FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
00442                               2 * 64 * sizeof(int), fail)
00443         }
00444     }
00445     FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(DCTELEM), fail)
00446     s->block = s->blocks[0];
00447 
00448     for (i = 0; i < 12; i++) {
00449         s->pblocks[i] = &s->block[i];
00450     }
00451 
00452     if (s->out_format == FMT_H263) {
00453         /* ac values */
00454         FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
00455                           yc_size * sizeof(int16_t) * 16, fail);
00456         s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
00457         s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
00458         s->ac_val[2] = s->ac_val[1] + c_size;
00459     }
00460 
00461     return 0;
00462 fail:
00463     return -1; // free() through MPV_common_end()
00464 }
00465 
00466 static void free_duplicate_context(MpegEncContext *s)
00467 {
00468     if (s == NULL)
00469         return;
00470 
00471     av_freep(&s->edge_emu_buffer);
00472     av_freep(&s->me.scratchpad);
00473     s->me.temp =
00474     s->rd_scratchpad =
00475     s->b_scratchpad =
00476     s->obmc_scratchpad = NULL;
00477 
00478     av_freep(&s->dct_error_sum);
00479     av_freep(&s->me.map);
00480     av_freep(&s->me.score_map);
00481     av_freep(&s->blocks);
00482     av_freep(&s->ac_val_base);
00483     s->block = NULL;
00484 }
00485 
00486 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
00487 {
00488 #define COPY(a) bak->a = src->a
00489     COPY(edge_emu_buffer);
00490     COPY(me.scratchpad);
00491     COPY(me.temp);
00492     COPY(rd_scratchpad);
00493     COPY(b_scratchpad);
00494     COPY(obmc_scratchpad);
00495     COPY(me.map);
00496     COPY(me.score_map);
00497     COPY(blocks);
00498     COPY(block);
00499     COPY(start_mb_y);
00500     COPY(end_mb_y);
00501     COPY(me.map_generation);
00502     COPY(pb);
00503     COPY(dct_error_sum);
00504     COPY(dct_count[0]);
00505     COPY(dct_count[1]);
00506     COPY(ac_val_base);
00507     COPY(ac_val[0]);
00508     COPY(ac_val[1]);
00509     COPY(ac_val[2]);
00510 #undef COPY
00511 }
00512 
00513 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
00514 {
00515     MpegEncContext bak;
00516     int i;
00517     // FIXME copy only needed parts
00518     // START_TIMER
00519     backup_duplicate_context(&bak, dst);
00520     memcpy(dst, src, sizeof(MpegEncContext));
00521     backup_duplicate_context(dst, &bak);
00522     for (i = 0; i < 12; i++) {
00523         dst->pblocks[i] = &dst->block[i];
00524     }
00525     // STOP_TIMER("update_duplicate_context")
00526     // about 10k cycles / 0.01 sec for  1000frames on 1ghz with 2 threads
00527 }
00528 
00529 int ff_mpeg_update_thread_context(AVCodecContext *dst,
00530                                   const AVCodecContext *src)
00531 {
00532     MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
00533 
00534     if (dst == src || !s1->context_initialized)
00535         return 0;
00536 
00537     // FIXME can parameters change on I-frames?
00538     // in that case dst may need a reinit
00539     if (!s->context_initialized) {
00540         memcpy(s, s1, sizeof(MpegEncContext));
00541 
00542         s->avctx                 = dst;
00543         s->picture_range_start  += MAX_PICTURE_COUNT;
00544         s->picture_range_end    += MAX_PICTURE_COUNT;
00545         s->bitstream_buffer      = NULL;
00546         s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
00547 
00548         MPV_common_init(s);
00549     }
00550 
00551     s->avctx->coded_height  = s1->avctx->coded_height;
00552     s->avctx->coded_width   = s1->avctx->coded_width;
00553     s->avctx->width         = s1->avctx->width;
00554     s->avctx->height        = s1->avctx->height;
00555 
00556     s->coded_picture_number = s1->coded_picture_number;
00557     s->picture_number       = s1->picture_number;
00558     s->input_picture_number = s1->input_picture_number;
00559 
00560     memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
00561     memcpy(&s->last_picture, &s1->last_picture,
00562            (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
00563 
00564     s->last_picture_ptr    = REBASE_PICTURE(s1->last_picture_ptr,    s, s1);
00565     s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
00566     s->next_picture_ptr    = REBASE_PICTURE(s1->next_picture_ptr,    s, s1);
00567 
00568     // Error/bug resilience
00569     s->next_p_frame_damaged = s1->next_p_frame_damaged;
00570     s->workaround_bugs      = s1->workaround_bugs;
00571 
00572     // MPEG4 timing info
00573     memcpy(&s->time_increment_bits, &s1->time_increment_bits,
00574            (char *) &s1->shape - (char *) &s1->time_increment_bits);
00575 
00576     // B-frame info
00577     s->max_b_frames = s1->max_b_frames;
00578     s->low_delay    = s1->low_delay;
00579     s->dropable     = s1->dropable;
00580 
00581     // DivX handling (doesn't work)
00582     s->divx_packed  = s1->divx_packed;
00583 
00584     if (s1->bitstream_buffer) {
00585         if (s1->bitstream_buffer_size +
00586             FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
00587             av_fast_malloc(&s->bitstream_buffer,
00588                            &s->allocated_bitstream_buffer_size,
00589                            s1->allocated_bitstream_buffer_size);
00590             s->bitstream_buffer_size = s1->bitstream_buffer_size;
00591         memcpy(s->bitstream_buffer, s1->bitstream_buffer,
00592                s1->bitstream_buffer_size);
00593         memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
00594                FF_INPUT_BUFFER_PADDING_SIZE);
00595     }
00596 
00597     // MPEG2/interlacing info
00598     memcpy(&s->progressive_sequence, &s1->progressive_sequence,
00599            (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
00600 
00601     if (!s1->first_field) {
00602         s->last_pict_type = s1->pict_type;
00603         if (s1->current_picture_ptr)
00604             s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
00605 
00606         if (s1->pict_type != AV_PICTURE_TYPE_B) {
00607             s->last_non_b_pict_type = s1->pict_type;
00608         }
00609     }
00610 
00611     return 0;
00612 }
00613 
00620 void MPV_common_defaults(MpegEncContext *s)
00621 {
00622     s->y_dc_scale_table      =
00623     s->c_dc_scale_table      = ff_mpeg1_dc_scale_table;
00624     s->chroma_qscale_table   = ff_default_chroma_qscale_table;
00625     s->progressive_frame     = 1;
00626     s->progressive_sequence  = 1;
00627     s->picture_structure     = PICT_FRAME;
00628 
00629     s->coded_picture_number  = 0;
00630     s->picture_number        = 0;
00631     s->input_picture_number  = 0;
00632 
00633     s->picture_in_gop_number = 0;
00634 
00635     s->f_code                = 1;
00636     s->b_code                = 1;
00637 
00638     s->picture_range_start   = 0;
00639     s->picture_range_end     = MAX_PICTURE_COUNT;
00640 
00641     s->slice_context_count   = 1;
00642 }
00643 
00649 void MPV_decode_defaults(MpegEncContext *s)
00650 {
00651     MPV_common_defaults(s);
00652 }
00653 
00658 av_cold int MPV_common_init(MpegEncContext *s)
00659 {
00660     int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
00661     int nb_slices = (HAVE_THREADS &&
00662                      s->avctx->active_thread_type & FF_THREAD_SLICE) ?
00663                     s->avctx->thread_count : 1;
00664 
00665     if (s->encoding && s->avctx->slices)
00666         nb_slices = s->avctx->slices;
00667 
00668     if (s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
00669         s->mb_height = (s->height + 31) / 32 * 2;
00670     else if (s->codec_id != CODEC_ID_H264)
00671         s->mb_height = (s->height + 15) / 16;
00672 
00673     if (s->avctx->pix_fmt == PIX_FMT_NONE) {
00674         av_log(s->avctx, AV_LOG_ERROR,
00675                "decoding to PIX_FMT_NONE is not supported.\n");
00676         return -1;
00677     }
00678 
00679     if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
00680         int max_slices;
00681         if (s->mb_height)
00682             max_slices = FFMIN(MAX_THREADS, s->mb_height);
00683         else
00684             max_slices = MAX_THREADS;
00685         av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
00686                " reducing to %d\n", nb_slices, max_slices);
00687         nb_slices = max_slices;
00688     }
00689 
00690     if ((s->width || s->height) &&
00691         av_image_check_size(s->width, s->height, 0, s->avctx))
00692         return -1;
00693 
00694     ff_dct_common_init(s);
00695 
00696     s->flags  = s->avctx->flags;
00697     s->flags2 = s->avctx->flags2;
00698 
00699     if (s->width && s->height) {
00700         s->mb_width   = (s->width + 15) / 16;
00701         s->mb_stride  = s->mb_width + 1;
00702         s->b8_stride  = s->mb_width * 2 + 1;
00703         s->b4_stride  = s->mb_width * 4 + 1;
00704         mb_array_size = s->mb_height * s->mb_stride;
00705         mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
00706 
00707         /* set chroma shifts */
00708         avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &s->chroma_x_shift,
00709                                       &s->chroma_y_shift);
00710 
00711         /* set default edge pos, will be overriden
00712          * in decode_header if needed */
00713         s->h_edge_pos = s->mb_width * 16;
00714         s->v_edge_pos = s->mb_height * 16;
00715 
00716         s->mb_num     = s->mb_width * s->mb_height;
00717 
00718         s->block_wrap[0] =
00719         s->block_wrap[1] =
00720         s->block_wrap[2] =
00721         s->block_wrap[3] = s->b8_stride;
00722         s->block_wrap[4] =
00723         s->block_wrap[5] = s->mb_stride;
00724 
00725         y_size  = s->b8_stride * (2 * s->mb_height + 1);
00726         c_size  = s->mb_stride * (s->mb_height + 1);
00727         yc_size = y_size + 2   * c_size;
00728 
00729         /* convert fourcc to upper case */
00730         s->codec_tag          = avpriv_toupper4(s->avctx->codec_tag);
00731 
00732         s->stream_codec_tag   = avpriv_toupper4(s->avctx->stream_codec_tag);
00733 
00734         s->avctx->coded_frame = (AVFrame *)&s->current_picture;
00735 
00736         FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
00737                           fail); // error ressilience code looks cleaner with this
00738         for (y = 0; y < s->mb_height; y++)
00739             for (x = 0; x < s->mb_width; x++)
00740                 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
00741 
00742         s->mb_index2xy[s->mb_height * s->mb_width] =
00743                        (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
00744 
00745         if (s->encoding) {
00746             /* Allocate MV tables */
00747             FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
00748                               mv_table_size * 2 * sizeof(int16_t), fail);
00749             FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
00750                               mv_table_size * 2 * sizeof(int16_t), fail);
00751             FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
00752                               mv_table_size * 2 * sizeof(int16_t), fail);
00753             FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
00754                               mv_table_size * 2 * sizeof(int16_t), fail);
00755             FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
00756                               mv_table_size * 2 * sizeof(int16_t), fail);
00757             FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
00758                               mv_table_size * 2 * sizeof(int16_t), fail);
00759             s->p_mv_table            = s->p_mv_table_base +
00760                                        s->mb_stride + 1;
00761             s->b_forw_mv_table       = s->b_forw_mv_table_base +
00762                                        s->mb_stride + 1;
00763             s->b_back_mv_table       = s->b_back_mv_table_base +
00764                                        s->mb_stride + 1;
00765             s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
00766                                        s->mb_stride + 1;
00767             s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
00768                                        s->mb_stride + 1;
00769             s->b_direct_mv_table     = s->b_direct_mv_table_base +
00770                                        s->mb_stride + 1;
00771 
00772             if (s->msmpeg4_version) {
00773                 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
00774                                   2 * 2 * (MAX_LEVEL + 1) *
00775                                   (MAX_RUN + 1) * 2 * sizeof(int), fail);
00776             }
00777             FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
00778 
00779             /* Allocate MB type table */
00780             FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
00781                               sizeof(uint16_t), fail); // needed for encoding
00782 
00783             FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
00784                               sizeof(int), fail);
00785 
00786             FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix,
00787                               64 * 32   * sizeof(int), fail);
00788             FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix,
00789                               64 * 32   * sizeof(int), fail);
00790             FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16,
00791                               64 * 32 * 2 * sizeof(uint16_t), fail);
00792             FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16,
00793                               64 * 32 * 2 * sizeof(uint16_t), fail);
00794             FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
00795                               MAX_PICTURE_COUNT * sizeof(Picture *), fail);
00796             FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
00797                               MAX_PICTURE_COUNT * sizeof(Picture *), fail);
00798 
00799             if (s->avctx->noise_reduction) {
00800                 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
00801                                   2 * 64 * sizeof(uint16_t), fail);
00802             }
00803         }
00804     }
00805 
00806     s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
00807     FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
00808                       s->picture_count * sizeof(Picture), fail);
00809     for (i = 0; i < s->picture_count; i++) {
00810         avcodec_get_frame_defaults((AVFrame *) &s->picture[i]);
00811     }
00812 
00813     if (s->width && s->height) {
00814         FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table,
00815                           mb_array_size * sizeof(uint8_t), fail);
00816 
00817         if (s->codec_id == CODEC_ID_MPEG4 ||
00818             (s->flags & CODEC_FLAG_INTERLACED_ME)) {
00819             /* interlaced direct mode decoding tables */
00820             for (i = 0; i < 2; i++) {
00821                 int j, k;
00822                 for (j = 0; j < 2; j++) {
00823                     for (k = 0; k < 2; k++) {
00824                         FF_ALLOCZ_OR_GOTO(s->avctx,
00825                                           s->b_field_mv_table_base[i][j][k],
00826                                           mv_table_size * 2 * sizeof(int16_t),
00827                                           fail);
00828                         s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
00829                                                        s->mb_stride + 1;
00830                     }
00831                     FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
00832                                       mb_array_size * 2 * sizeof(uint8_t),
00833                                       fail);
00834                     FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
00835                                       mv_table_size * 2 * sizeof(int16_t),
00836                                       fail);
00837                     s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
00838                                                 + s->mb_stride + 1;
00839                 }
00840                 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
00841                                   mb_array_size * 2 * sizeof(uint8_t),
00842                                   fail);
00843             }
00844         }
00845         if (s->out_format == FMT_H263) {
00846             /* cbp values */
00847             FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
00848             s->coded_block = s->coded_block_base + s->b8_stride + 1;
00849 
00850             /* cbp, ac_pred, pred_dir */
00851             FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
00852                               mb_array_size * sizeof(uint8_t), fail);
00853             FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
00854                               mb_array_size * sizeof(uint8_t), fail);
00855         }
00856 
00857         if (s->h263_pred || s->h263_plus || !s->encoding) {
00858             /* dc values */
00859             // MN: we need these for  error resilience of intra-frames
00860             FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
00861                               yc_size * sizeof(int16_t), fail);
00862             s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
00863             s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
00864             s->dc_val[2] = s->dc_val[1] + c_size;
00865             for (i = 0; i < yc_size; i++)
00866                 s->dc_val_base[i] = 1024;
00867         }
00868 
00869         /* which mb is a intra block */
00870         FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
00871         memset(s->mbintra_table, 1, mb_array_size);
00872 
00873         /* init macroblock skip table */
00874         FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
00875         // Note the + 1 is for  a quicker mpeg4 slice_end detection
00876 
00877         s->parse_context.state = -1;
00878         if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
00879             s->avctx->debug_mv) {
00880             s->visualization_buffer[0] = av_malloc((s->mb_width * 16 +
00881                         2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
00882             s->visualization_buffer[1] = av_malloc((s->mb_width * 16 +
00883                         2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
00884             s->visualization_buffer[2] = av_malloc((s->mb_width * 16 +
00885                         2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
00886         }
00887     }
00888 
00889     s->context_initialized = 1;
00890     s->thread_context[0]   = s;
00891 
00892     if (s->width && s->height) {
00893         if (nb_slices > 1) {
00894             for (i = 1; i < nb_slices; i++) {
00895                 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
00896                 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
00897             }
00898 
00899             for (i = 0; i < nb_slices; i++) {
00900                 if (init_duplicate_context(s->thread_context[i], s) < 0)
00901                     goto fail;
00902                     s->thread_context[i]->start_mb_y =
00903                         (s->mb_height * (i) + nb_slices / 2) / nb_slices;
00904                     s->thread_context[i]->end_mb_y   =
00905                         (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
00906             }
00907         } else {
00908             if (init_duplicate_context(s, s) < 0)
00909                 goto fail;
00910             s->start_mb_y = 0;
00911             s->end_mb_y   = s->mb_height;
00912         }
00913         s->slice_context_count = nb_slices;
00914     }
00915 
00916     return 0;
00917  fail:
00918     MPV_common_end(s);
00919     return -1;
00920 }
00921 
00922 /* init common structure for both encoder and decoder */
00923 void MPV_common_end(MpegEncContext *s)
00924 {
00925     int i, j, k;
00926 
00927     if (s->slice_context_count > 1) {
00928         for (i = 0; i < s->slice_context_count; i++) {
00929             free_duplicate_context(s->thread_context[i]);
00930         }
00931         for (i = 1; i < s->slice_context_count; i++) {
00932             av_freep(&s->thread_context[i]);
00933         }
00934         s->slice_context_count = 1;
00935     } else free_duplicate_context(s);
00936 
00937     av_freep(&s->parse_context.buffer);
00938     s->parse_context.buffer_size = 0;
00939 
00940     av_freep(&s->mb_type);
00941     av_freep(&s->p_mv_table_base);
00942     av_freep(&s->b_forw_mv_table_base);
00943     av_freep(&s->b_back_mv_table_base);
00944     av_freep(&s->b_bidir_forw_mv_table_base);
00945     av_freep(&s->b_bidir_back_mv_table_base);
00946     av_freep(&s->b_direct_mv_table_base);
00947     s->p_mv_table            = NULL;
00948     s->b_forw_mv_table       = NULL;
00949     s->b_back_mv_table       = NULL;
00950     s->b_bidir_forw_mv_table = NULL;
00951     s->b_bidir_back_mv_table = NULL;
00952     s->b_direct_mv_table     = NULL;
00953     for (i = 0; i < 2; i++) {
00954         for (j = 0; j < 2; j++) {
00955             for (k = 0; k < 2; k++) {
00956                 av_freep(&s->b_field_mv_table_base[i][j][k]);
00957                 s->b_field_mv_table[i][j][k] = NULL;
00958             }
00959             av_freep(&s->b_field_select_table[i][j]);
00960             av_freep(&s->p_field_mv_table_base[i][j]);
00961             s->p_field_mv_table[i][j] = NULL;
00962         }
00963         av_freep(&s->p_field_select_table[i]);
00964     }
00965 
00966     av_freep(&s->dc_val_base);
00967     av_freep(&s->coded_block_base);
00968     av_freep(&s->mbintra_table);
00969     av_freep(&s->cbp_table);
00970     av_freep(&s->pred_dir_table);
00971 
00972     av_freep(&s->mbskip_table);
00973     av_freep(&s->bitstream_buffer);
00974     s->allocated_bitstream_buffer_size = 0;
00975 
00976     av_freep(&s->avctx->stats_out);
00977     av_freep(&s->ac_stats);
00978     av_freep(&s->error_status_table);
00979     av_freep(&s->mb_index2xy);
00980     av_freep(&s->lambda_table);
00981     av_freep(&s->q_intra_matrix);
00982     av_freep(&s->q_inter_matrix);
00983     av_freep(&s->q_intra_matrix16);
00984     av_freep(&s->q_inter_matrix16);
00985     av_freep(&s->input_picture);
00986     av_freep(&s->reordered_input_picture);
00987     av_freep(&s->dct_offset);
00988 
00989     if (s->picture && !s->avctx->internal->is_copy) {
00990         for (i = 0; i < s->picture_count; i++) {
00991             free_picture(s, &s->picture[i]);
00992         }
00993     }
00994     av_freep(&s->picture);
00995     s->context_initialized      = 0;
00996     s->last_picture_ptr         =
00997     s->next_picture_ptr         =
00998     s->current_picture_ptr      = NULL;
00999     s->linesize = s->uvlinesize = 0;
01000 
01001     for (i = 0; i < 3; i++)
01002         av_freep(&s->visualization_buffer[i]);
01003 
01004     if (!(s->avctx->active_thread_type & FF_THREAD_FRAME))
01005         avcodec_default_free_buffers(s->avctx);
01006 }
01007 
01008 void init_rl(RLTable *rl,
01009              uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
01010 {
01011     int8_t  max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
01012     uint8_t index_run[MAX_RUN + 1];
01013     int last, run, level, start, end, i;
01014 
01015     /* If table is static, we can quit if rl->max_level[0] is not NULL */
01016     if (static_store && rl->max_level[0])
01017         return;
01018 
01019     /* compute max_level[], max_run[] and index_run[] */
01020     for (last = 0; last < 2; last++) {
01021         if (last == 0) {
01022             start = 0;
01023             end = rl->last;
01024         } else {
01025             start = rl->last;
01026             end = rl->n;
01027         }
01028 
01029         memset(max_level, 0, MAX_RUN + 1);
01030         memset(max_run, 0, MAX_LEVEL + 1);
01031         memset(index_run, rl->n, MAX_RUN + 1);
01032         for (i = start; i < end; i++) {
01033             run   = rl->table_run[i];
01034             level = rl->table_level[i];
01035             if (index_run[run] == rl->n)
01036                 index_run[run] = i;
01037             if (level > max_level[run])
01038                 max_level[run] = level;
01039             if (run > max_run[level])
01040                 max_run[level] = run;
01041         }
01042         if (static_store)
01043             rl->max_level[last] = static_store[last];
01044         else
01045             rl->max_level[last] = av_malloc(MAX_RUN + 1);
01046         memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
01047         if (static_store)
01048             rl->max_run[last]   = static_store[last] + MAX_RUN + 1;
01049         else
01050             rl->max_run[last]   = av_malloc(MAX_LEVEL + 1);
01051         memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
01052         if (static_store)
01053             rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
01054         else
01055             rl->index_run[last] = av_malloc(MAX_RUN + 1);
01056         memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
01057     }
01058 }
01059 
01060 void init_vlc_rl(RLTable *rl)
01061 {
01062     int i, q;
01063 
01064     for (q = 0; q < 32; q++) {
01065         int qmul = q * 2;
01066         int qadd = (q - 1) | 1;
01067 
01068         if (q == 0) {
01069             qmul = 1;
01070             qadd = 0;
01071         }
01072         for (i = 0; i < rl->vlc.table_size; i++) {
01073             int code = rl->vlc.table[i][0];
01074             int len  = rl->vlc.table[i][1];
01075             int level, run;
01076 
01077             if (len == 0) { // illegal code
01078                 run   = 66;
01079                 level = MAX_LEVEL;
01080             } else if (len < 0) { // more bits needed
01081                 run   = 0;
01082                 level = code;
01083             } else {
01084                 if (code == rl->n) { // esc
01085                     run   = 66;
01086                     level =  0;
01087                 } else {
01088                     run   = rl->table_run[code] + 1;
01089                     level = rl->table_level[code] * qmul + qadd;
01090                     if (code >= rl->last) run += 192;
01091                 }
01092             }
01093             rl->rl_vlc[q][i].len   = len;
01094             rl->rl_vlc[q][i].level = level;
01095             rl->rl_vlc[q][i].run   = run;
01096         }
01097     }
01098 }
01099 
01100 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
01101 {
01102     int i;
01103 
01104     /* release non reference frames */
01105     for (i = 0; i < s->picture_count; i++) {
01106         if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
01107             (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
01108             (remove_current || &s->picture[i] !=  s->current_picture_ptr)
01109             /* && s->picture[i].type!= FF_BUFFER_TYPE_SHARED */) {
01110             free_frame_buffer(s, &s->picture[i]);
01111         }
01112     }
01113 }
01114 
01115 int ff_find_unused_picture(MpegEncContext *s, int shared)
01116 {
01117     int i;
01118 
01119     if (shared) {
01120         for (i = s->picture_range_start; i < s->picture_range_end; i++) {
01121             if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
01122                 return i;
01123         }
01124     } else {
01125         for (i = s->picture_range_start; i < s->picture_range_end; i++) {
01126             if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type != 0)
01127                 return i; // FIXME
01128         }
01129         for (i = s->picture_range_start; i < s->picture_range_end; i++) {
01130             if (s->picture[i].f.data[0] == NULL)
01131                 return i;
01132         }
01133     }
01134 
01135     return AVERROR_INVALIDDATA;
01136 }
01137 
01138 static void update_noise_reduction(MpegEncContext *s)
01139 {
01140     int intra, i;
01141 
01142     for (intra = 0; intra < 2; intra++) {
01143         if (s->dct_count[intra] > (1 << 16)) {
01144             for (i = 0; i < 64; i++) {
01145                 s->dct_error_sum[intra][i] >>= 1;
01146             }
01147             s->dct_count[intra] >>= 1;
01148         }
01149 
01150         for (i = 0; i < 64; i++) {
01151             s->dct_offset[intra][i] = (s->avctx->noise_reduction *
01152                                        s->dct_count[intra] +
01153                                        s->dct_error_sum[intra][i] / 2) /
01154                                       (s->dct_error_sum[intra][i] + 1);
01155         }
01156     }
01157 }
01158 
01163 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
01164 {
01165     int i;
01166     Picture *pic;
01167     s->mb_skipped = 0;
01168 
01169     assert(s->last_picture_ptr == NULL || s->out_format != FMT_H264 ||
01170            s->codec_id == CODEC_ID_SVQ3);
01171 
01172     /* mark & release old frames */
01173     if (s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3) {
01174         if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
01175             s->last_picture_ptr != s->next_picture_ptr &&
01176             s->last_picture_ptr->f.data[0]) {
01177             if (s->last_picture_ptr->owner2 == s)
01178                 free_frame_buffer(s, s->last_picture_ptr);
01179         }
01180 
01181         /* release forgotten pictures */
01182         /* if (mpeg124/h263) */
01183         if (!s->encoding) {
01184             for (i = 0; i < s->picture_count; i++) {
01185                 if (s->picture[i].owner2 == s && s->picture[i].f.data[0] &&
01186                     &s->picture[i] != s->last_picture_ptr &&
01187                     &s->picture[i] != s->next_picture_ptr &&
01188                     s->picture[i].f.reference) {
01189                     if (!(avctx->active_thread_type & FF_THREAD_FRAME))
01190                         av_log(avctx, AV_LOG_ERROR,
01191                                "releasing zombie picture\n");
01192                     free_frame_buffer(s, &s->picture[i]);
01193                 }
01194             }
01195         }
01196     }
01197 
01198     if (!s->encoding) {
01199         ff_release_unused_pictures(s, 1);
01200 
01201         if (s->current_picture_ptr &&
01202             s->current_picture_ptr->f.data[0] == NULL) {
01203             // we already have a unused image
01204             // (maybe it was set before reading the header)
01205             pic = s->current_picture_ptr;
01206         } else {
01207             i   = ff_find_unused_picture(s, 0);
01208             pic = &s->picture[i];
01209         }
01210 
01211         pic->f.reference = 0;
01212         if (!s->dropable) {
01213             if (s->codec_id == CODEC_ID_H264)
01214                 pic->f.reference = s->picture_structure;
01215             else if (s->pict_type != AV_PICTURE_TYPE_B)
01216                 pic->f.reference = 3;
01217         }
01218 
01219         pic->f.coded_picture_number = s->coded_picture_number++;
01220 
01221         if (ff_alloc_picture(s, pic, 0) < 0)
01222             return -1;
01223 
01224         s->current_picture_ptr = pic;
01225         // FIXME use only the vars from current_pic
01226         s->current_picture_ptr->f.top_field_first = s->top_field_first;
01227         if (s->codec_id == CODEC_ID_MPEG1VIDEO ||
01228             s->codec_id == CODEC_ID_MPEG2VIDEO) {
01229             if (s->picture_structure != PICT_FRAME)
01230                 s->current_picture_ptr->f.top_field_first =
01231                     (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
01232         }
01233         s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
01234                                                      !s->progressive_sequence;
01235         s->current_picture_ptr->field_picture      =  s->picture_structure != PICT_FRAME;
01236     }
01237 
01238     s->current_picture_ptr->f.pict_type = s->pict_type;
01239     // if (s->flags && CODEC_FLAG_QSCALE)
01240     //     s->current_picture_ptr->quality = s->new_picture_ptr->quality;
01241     s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
01242 
01243     ff_copy_picture(&s->current_picture, s->current_picture_ptr);
01244 
01245     if (s->pict_type != AV_PICTURE_TYPE_B) {
01246         s->last_picture_ptr = s->next_picture_ptr;
01247         if (!s->dropable)
01248             s->next_picture_ptr = s->current_picture_ptr;
01249     }
01250     /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
01251            s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
01252            s->last_picture_ptr    ? s->last_picture_ptr->f.data[0]    : NULL,
01253            s->next_picture_ptr    ? s->next_picture_ptr->f.data[0]    : NULL,
01254            s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
01255            s->pict_type, s->dropable); */
01256 
01257     if (s->codec_id != CODEC_ID_H264) {
01258         if ((s->last_picture_ptr == NULL ||
01259              s->last_picture_ptr->f.data[0] == NULL) &&
01260             (s->pict_type != AV_PICTURE_TYPE_I ||
01261              s->picture_structure != PICT_FRAME)) {
01262             if (s->pict_type != AV_PICTURE_TYPE_I)
01263                 av_log(avctx, AV_LOG_ERROR,
01264                        "warning: first frame is no keyframe\n");
01265             else if (s->picture_structure != PICT_FRAME)
01266                 av_log(avctx, AV_LOG_INFO,
01267                        "allocate dummy last picture for field based first keyframe\n");
01268 
01269             /* Allocate a dummy frame */
01270             i = ff_find_unused_picture(s, 0);
01271             s->last_picture_ptr = &s->picture[i];
01272             if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
01273                 return -1;
01274             ff_thread_report_progress((AVFrame *) s->last_picture_ptr,
01275                                       INT_MAX, 0);
01276             ff_thread_report_progress((AVFrame *) s->last_picture_ptr,
01277                                       INT_MAX, 1);
01278         }
01279         if ((s->next_picture_ptr == NULL ||
01280              s->next_picture_ptr->f.data[0] == NULL) &&
01281             s->pict_type == AV_PICTURE_TYPE_B) {
01282             /* Allocate a dummy frame */
01283             i = ff_find_unused_picture(s, 0);
01284             s->next_picture_ptr = &s->picture[i];
01285             if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
01286                 return -1;
01287             ff_thread_report_progress((AVFrame *) s->next_picture_ptr,
01288                                       INT_MAX, 0);
01289             ff_thread_report_progress((AVFrame *) s->next_picture_ptr,
01290                                       INT_MAX, 1);
01291         }
01292     }
01293 
01294     if (s->last_picture_ptr)
01295         ff_copy_picture(&s->last_picture, s->last_picture_ptr);
01296     if (s->next_picture_ptr)
01297         ff_copy_picture(&s->next_picture, s->next_picture_ptr);
01298 
01299     if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME) &&
01300         (s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3)) {
01301         if (s->next_picture_ptr)
01302             s->next_picture_ptr->owner2 = s;
01303         if (s->last_picture_ptr)
01304             s->last_picture_ptr->owner2 = s;
01305     }
01306 
01307     assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
01308                                                  s->last_picture_ptr->f.data[0]));
01309 
01310     if (s->picture_structure!= PICT_FRAME && s->out_format != FMT_H264) {
01311         int i;
01312         for (i = 0; i < 4; i++) {
01313             if (s->picture_structure == PICT_BOTTOM_FIELD) {
01314                 s->current_picture.f.data[i] +=
01315                     s->current_picture.f.linesize[i];
01316             }
01317             s->current_picture.f.linesize[i] *= 2;
01318             s->last_picture.f.linesize[i]    *= 2;
01319             s->next_picture.f.linesize[i]    *= 2;
01320         }
01321     }
01322 
01323     s->err_recognition = avctx->err_recognition;
01324 
01325     /* set dequantizer, we can't do it during init as
01326      * it might change for mpeg4 and we can't do it in the header
01327      * decode as init is not called for mpeg4 there yet */
01328     if (s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO) {
01329         s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
01330         s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
01331     } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
01332         s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
01333         s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
01334     } else {
01335         s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
01336         s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
01337     }
01338 
01339     if (s->dct_error_sum) {
01340         assert(s->avctx->noise_reduction && s->encoding);
01341         update_noise_reduction(s);
01342     }
01343 
01344     if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
01345         return ff_xvmc_field_start(s, avctx);
01346 
01347     return 0;
01348 }
01349 
01350 /* generic function for encode/decode called after a
01351  * frame has been coded/decoded. */
01352 void MPV_frame_end(MpegEncContext *s)
01353 {
01354     int i;
01355     /* redraw edges for the frame if decoding didn't complete */
01356     // just to make sure that all data is rendered.
01357     if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
01358         ff_xvmc_field_end(s);
01359    } else if ((s->error_count || s->encoding) &&
01360               !s->avctx->hwaccel &&
01361               !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
01362               s->unrestricted_mv &&
01363               s->current_picture.f.reference &&
01364               !s->intra_only &&
01365               !(s->flags & CODEC_FLAG_EMU_EDGE)) {
01366         int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
01367         int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
01368         s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
01369                           s->h_edge_pos, s->v_edge_pos,
01370                           EDGE_WIDTH, EDGE_WIDTH,
01371                           EDGE_TOP | EDGE_BOTTOM);
01372         s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
01373                           s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
01374                           EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
01375                           EDGE_TOP | EDGE_BOTTOM);
01376         s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
01377                           s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
01378                           EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
01379                           EDGE_TOP | EDGE_BOTTOM);
01380     }
01381 
01382     emms_c();
01383 
01384     s->last_pict_type                 = s->pict_type;
01385     s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
01386     if (s->pict_type!= AV_PICTURE_TYPE_B) {
01387         s->last_non_b_pict_type = s->pict_type;
01388     }
01389 #if 0
01390     /* copy back current_picture variables */
01391     for (i = 0; i < MAX_PICTURE_COUNT; i++) {
01392         if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
01393             s->picture[i] = s->current_picture;
01394             break;
01395         }
01396     }
01397     assert(i < MAX_PICTURE_COUNT);
01398 #endif
01399 
01400     if (s->encoding) {
01401         /* release non-reference frames */
01402         for (i = 0; i < s->picture_count; i++) {
01403             if (s->picture[i].f.data[0] && !s->picture[i].f.reference
01404                 /* && s->picture[i].type != FF_BUFFER_TYPE_SHARED */) {
01405                 free_frame_buffer(s, &s->picture[i]);
01406             }
01407         }
01408     }
01409     // clear copies, to avoid confusion
01410 #if 0
01411     memset(&s->last_picture,    0, sizeof(Picture));
01412     memset(&s->next_picture,    0, sizeof(Picture));
01413     memset(&s->current_picture, 0, sizeof(Picture));
01414 #endif
01415     s->avctx->coded_frame = (AVFrame *) s->current_picture_ptr;
01416 
01417     if (s->codec_id != CODEC_ID_H264 && s->current_picture.f.reference) {
01418         ff_thread_report_progress((AVFrame *) s->current_picture_ptr,
01419                                   s->mb_height - 1, 0);
01420     }
01421 }
01422 
01430 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
01431                       int w, int h, int stride, int color)
01432 {
01433     int x, y, fr, f;
01434 
01435     sx = av_clip(sx, 0, w - 1);
01436     sy = av_clip(sy, 0, h - 1);
01437     ex = av_clip(ex, 0, w - 1);
01438     ey = av_clip(ey, 0, h - 1);
01439 
01440     buf[sy * stride + sx] += color;
01441 
01442     if (FFABS(ex - sx) > FFABS(ey - sy)) {
01443         if (sx > ex) {
01444             FFSWAP(int, sx, ex);
01445             FFSWAP(int, sy, ey);
01446         }
01447         buf += sx + sy * stride;
01448         ex  -= sx;
01449         f    = ((ey - sy) << 16) / ex;
01450         for (x = 0; x = ex; x++) {
01451             y  = (x * f) >> 16;
01452             fr = (x * f) & 0xFFFF;
01453             buf[y * stride + x]       += (color * (0x10000 - fr)) >> 16;
01454             buf[(y + 1) * stride + x] += (color *            fr ) >> 16;
01455         }
01456     } else {
01457         if (sy > ey) {
01458             FFSWAP(int, sx, ex);
01459             FFSWAP(int, sy, ey);
01460         }
01461         buf += sx + sy * stride;
01462         ey  -= sy;
01463         if (ey)
01464             f  = ((ex - sx) << 16) / ey;
01465         else
01466             f = 0;
01467         for (y = 0; y = ey; y++) {
01468             x  = (y * f) >> 16;
01469             fr = (y * f) & 0xFFFF;
01470             buf[y * stride + x]     += (color * (0x10000 - fr)) >> 16;
01471             buf[y * stride + x + 1] += (color *            fr ) >> 16;
01472         }
01473     }
01474 }
01475 
01483 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
01484                        int ey, int w, int h, int stride, int color)
01485 {
01486     int dx,dy;
01487 
01488     sx = av_clip(sx, -100, w + 100);
01489     sy = av_clip(sy, -100, h + 100);
01490     ex = av_clip(ex, -100, w + 100);
01491     ey = av_clip(ey, -100, h + 100);
01492 
01493     dx = ex - sx;
01494     dy = ey - sy;
01495 
01496     if (dx * dx + dy * dy > 3 * 3) {
01497         int rx =  dx + dy;
01498         int ry = -dx + dy;
01499         int length = ff_sqrt((rx * rx + ry * ry) << 8);
01500 
01501         // FIXME subpixel accuracy
01502         rx = ROUNDED_DIV(rx * 3 << 4, length);
01503         ry = ROUNDED_DIV(ry * 3 << 4, length);
01504 
01505         draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
01506         draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
01507     }
01508     draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
01509 }
01510 
01514 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict)
01515 {
01516     if (s->avctx->hwaccel || !pict || !pict->mb_type)
01517         return;
01518 
01519     if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
01520         int x,y;
01521 
01522         av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
01523         switch (pict->pict_type) {
01524         case AV_PICTURE_TYPE_I:
01525             av_log(s->avctx,AV_LOG_DEBUG,"I\n");
01526             break;
01527         case AV_PICTURE_TYPE_P:
01528             av_log(s->avctx,AV_LOG_DEBUG,"P\n");
01529             break;
01530         case AV_PICTURE_TYPE_B:
01531             av_log(s->avctx,AV_LOG_DEBUG,"B\n");
01532             break;
01533         case AV_PICTURE_TYPE_S:
01534             av_log(s->avctx,AV_LOG_DEBUG,"S\n");
01535             break;
01536         case AV_PICTURE_TYPE_SI:
01537             av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
01538             break;
01539         case AV_PICTURE_TYPE_SP:
01540             av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
01541             break;
01542         }
01543         for (y = 0; y < s->mb_height; y++) {
01544             for (x = 0; x < s->mb_width; x++) {
01545                 if (s->avctx->debug & FF_DEBUG_SKIP) {
01546                     int count = s->mbskip_table[x + y * s->mb_stride];
01547                     if (count > 9)
01548                         count = 9;
01549                     av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
01550                 }
01551                 if (s->avctx->debug & FF_DEBUG_QP) {
01552                     av_log(s->avctx, AV_LOG_DEBUG, "%2d",
01553                            pict->qscale_table[x + y * s->mb_stride]);
01554                 }
01555                 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
01556                     int mb_type = pict->mb_type[x + y * s->mb_stride];
01557                     // Type & MV direction
01558                     if (IS_PCM(mb_type))
01559                         av_log(s->avctx, AV_LOG_DEBUG, "P");
01560                     else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
01561                         av_log(s->avctx, AV_LOG_DEBUG, "A");
01562                     else if (IS_INTRA4x4(mb_type))
01563                         av_log(s->avctx, AV_LOG_DEBUG, "i");
01564                     else if (IS_INTRA16x16(mb_type))
01565                         av_log(s->avctx, AV_LOG_DEBUG, "I");
01566                     else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
01567                         av_log(s->avctx, AV_LOG_DEBUG, "d");
01568                     else if (IS_DIRECT(mb_type))
01569                         av_log(s->avctx, AV_LOG_DEBUG, "D");
01570                     else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
01571                         av_log(s->avctx, AV_LOG_DEBUG, "g");
01572                     else if (IS_GMC(mb_type))
01573                         av_log(s->avctx, AV_LOG_DEBUG, "G");
01574                     else if (IS_SKIP(mb_type))
01575                         av_log(s->avctx, AV_LOG_DEBUG, "S");
01576                     else if (!USES_LIST(mb_type, 1))
01577                         av_log(s->avctx, AV_LOG_DEBUG, ">");
01578                     else if (!USES_LIST(mb_type, 0))
01579                         av_log(s->avctx, AV_LOG_DEBUG, "<");
01580                     else {
01581                         assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
01582                         av_log(s->avctx, AV_LOG_DEBUG, "X");
01583                     }
01584 
01585                     // segmentation
01586                     if (IS_8X8(mb_type))
01587                         av_log(s->avctx, AV_LOG_DEBUG, "+");
01588                     else if (IS_16X8(mb_type))
01589                         av_log(s->avctx, AV_LOG_DEBUG, "-");
01590                     else if (IS_8X16(mb_type))
01591                         av_log(s->avctx, AV_LOG_DEBUG, "|");
01592                     else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
01593                         av_log(s->avctx, AV_LOG_DEBUG, " ");
01594                     else
01595                         av_log(s->avctx, AV_LOG_DEBUG, "?");
01596 
01597 
01598                     if (IS_INTERLACED(mb_type))
01599                         av_log(s->avctx, AV_LOG_DEBUG, "=");
01600                     else
01601                         av_log(s->avctx, AV_LOG_DEBUG, " ");
01602                 }
01603                 // av_log(s->avctx, AV_LOG_DEBUG, " ");
01604             }
01605             av_log(s->avctx, AV_LOG_DEBUG, "\n");
01606         }
01607     }
01608 
01609     if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
01610         (s->avctx->debug_mv)) {
01611         const int shift = 1 + s->quarter_sample;
01612         int mb_y;
01613         uint8_t *ptr;
01614         int i;
01615         int h_chroma_shift, v_chroma_shift, block_height;
01616         const int width          = s->avctx->width;
01617         const int height         = s->avctx->height;
01618         const int mv_sample_log2 = 4 - pict->motion_subsample_log2;
01619         const int mv_stride      = (s->mb_width << mv_sample_log2) +
01620                                    (s->codec_id == CODEC_ID_H264 ? 0 : 1);
01621         s->low_delay = 0; // needed to see the vectors without trashing the buffers
01622 
01623         avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
01624                                       &h_chroma_shift, &v_chroma_shift);
01625         for (i = 0; i < 3; i++) {
01626             memcpy(s->visualization_buffer[i], pict->data[i],
01627                    (i == 0) ? pict->linesize[i] * height:
01628                               pict->linesize[i] * height >> v_chroma_shift);
01629             pict->data[i] = s->visualization_buffer[i];
01630         }
01631         pict->type   = FF_BUFFER_TYPE_COPY;
01632         ptr          = pict->data[0];
01633         block_height = 16 >> v_chroma_shift;
01634 
01635         for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
01636             int mb_x;
01637             for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
01638                 const int mb_index = mb_x + mb_y * s->mb_stride;
01639                 if ((s->avctx->debug_mv) && pict->motion_val) {
01640                     int type;
01641                     for (type = 0; type < 3; type++) {
01642                         int direction = 0;
01643                         switch (type) {
01644                         case 0:
01645                             if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
01646                                 (pict->pict_type!= AV_PICTURE_TYPE_P))
01647                                 continue;
01648                             direction = 0;
01649                             break;
01650                         case 1:
01651                             if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
01652                                 (pict->pict_type!= AV_PICTURE_TYPE_B))
01653                                 continue;
01654                             direction = 0;
01655                             break;
01656                         case 2:
01657                             if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
01658                                 (pict->pict_type!= AV_PICTURE_TYPE_B))
01659                                 continue;
01660                             direction = 1;
01661                             break;
01662                         }
01663                         if (!USES_LIST(pict->mb_type[mb_index], direction))
01664                             continue;
01665 
01666                         if (IS_8X8(pict->mb_type[mb_index])) {
01667                             int i;
01668                             for (i = 0; i < 4; i++) {
01669                                 int sx = mb_x * 16 + 4 + 8 * (i & 1);
01670                                 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
01671                                 int xy = (mb_x * 2 + (i & 1) +
01672                                           (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
01673                                 int mx = (pict->motion_val[direction][xy][0] >> shift) + sx;
01674                                 int my = (pict->motion_val[direction][xy][1] >> shift) + sy;
01675                                 draw_arrow(ptr, sx, sy, mx, my, width,
01676                                            height, s->linesize, 100);
01677                             }
01678                         } else if (IS_16X8(pict->mb_type[mb_index])) {
01679                             int i;
01680                             for (i = 0; i < 2; i++) {
01681                                 int sx = mb_x * 16 + 8;
01682                                 int sy = mb_y * 16 + 4 + 8 * i;
01683                                 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
01684                                 int mx = (pict->motion_val[direction][xy][0] >> shift);
01685                                 int my = (pict->motion_val[direction][xy][1] >> shift);
01686 
01687                                 if (IS_INTERLACED(pict->mb_type[mb_index]))
01688                                     my *= 2;
01689 
01690                             draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
01691                                        height, s->linesize, 100);
01692                             }
01693                         } else if (IS_8X16(pict->mb_type[mb_index])) {
01694                             int i;
01695                             for (i = 0; i < 2; i++) {
01696                                 int sx = mb_x * 16 + 4 + 8 * i;
01697                                 int sy = mb_y * 16 + 8;
01698                                 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
01699                                 int mx = pict->motion_val[direction][xy][0] >> shift;
01700                                 int my = pict->motion_val[direction][xy][1] >> shift;
01701 
01702                                 if (IS_INTERLACED(pict->mb_type[mb_index]))
01703                                     my *= 2;
01704 
01705                                 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
01706                                            height, s->linesize, 100);
01707                             }
01708                         } else {
01709                               int sx = mb_x * 16 + 8;
01710                               int sy = mb_y * 16 + 8;
01711                               int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
01712                               int mx = pict->motion_val[direction][xy][0] >> shift + sx;
01713                               int my = pict->motion_val[direction][xy][1] >> shift + sy;
01714                               draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
01715                         }
01716                     }
01717                 }
01718                 if ((s->avctx->debug & FF_DEBUG_VIS_QP) && pict->motion_val) {
01719                     uint64_t c = (pict->qscale_table[mb_index] * 128 / 31) *
01720                                  0x0101010101010101ULL;
01721                     int y;
01722                     for (y = 0; y < block_height; y++) {
01723                         *(uint64_t *)(pict->data[1] + 8 * mb_x +
01724                                       (block_height * mb_y + y) *
01725                                       pict->linesize[1]) = c;
01726                         *(uint64_t *)(pict->data[2] + 8 * mb_x +
01727                                       (block_height * mb_y + y) *
01728                                       pict->linesize[2]) = c;
01729                     }
01730                 }
01731                 if ((s->avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
01732                     pict->motion_val) {
01733                     int mb_type = pict->mb_type[mb_index];
01734                     uint64_t u,v;
01735                     int y;
01736 #define COLOR(theta, r) \
01737     u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
01738     v = (int)(128 + r * sin(theta * 3.141592 / 180));
01739 
01740 
01741                     u = v = 128;
01742                     if (IS_PCM(mb_type)) {
01743                         COLOR(120, 48)
01744                     } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
01745                                IS_INTRA16x16(mb_type)) {
01746                         COLOR(30, 48)
01747                     } else if (IS_INTRA4x4(mb_type)) {
01748                         COLOR(90, 48)
01749                     } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
01750                         // COLOR(120, 48)
01751                     } else if (IS_DIRECT(mb_type)) {
01752                         COLOR(150, 48)
01753                     } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
01754                         COLOR(170, 48)
01755                     } else if (IS_GMC(mb_type)) {
01756                         COLOR(190, 48)
01757                     } else if (IS_SKIP(mb_type)) {
01758                         // COLOR(180, 48)
01759                     } else if (!USES_LIST(mb_type, 1)) {
01760                         COLOR(240, 48)
01761                     } else if (!USES_LIST(mb_type, 0)) {
01762                         COLOR(0, 48)
01763                     } else {
01764                         assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
01765                         COLOR(300,48)
01766                     }
01767 
01768                     u *= 0x0101010101010101ULL;
01769                     v *= 0x0101010101010101ULL;
01770                     for (y = 0; y < block_height; y++) {
01771                         *(uint64_t *)(pict->data[1] + 8 * mb_x +
01772                                       (block_height * mb_y + y) * pict->linesize[1]) = u;
01773                         *(uint64_t *)(pict->data[2] + 8 * mb_x +
01774                                       (block_height * mb_y + y) * pict->linesize[2]) = v;
01775                     }
01776 
01777                     // segmentation
01778                     if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
01779                         *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
01780                                       (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
01781                         *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
01782                                       (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
01783                     }
01784                     if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
01785                         for (y = 0; y < 16; y++)
01786                             pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
01787                                           pict->linesize[0]] ^= 0x80;
01788                     }
01789                     if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
01790                         int dm = 1 << (mv_sample_log2 - 2);
01791                         for (i = 0; i < 4; i++) {
01792                             int sx = mb_x * 16 + 8 * (i & 1);
01793                             int sy = mb_y * 16 + 8 * (i >> 1);
01794                             int xy = (mb_x * 2 + (i & 1) +
01795                                      (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
01796                             // FIXME bidir
01797                             int32_t *mv = (int32_t *) &pict->motion_val[0][xy];
01798                             if (mv[0] != mv[dm] ||
01799                                 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
01800                                 for (y = 0; y < 8; y++)
01801                                     pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
01802                             if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
01803                                 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
01804                                               pict->linesize[0]) ^= 0x8080808080808080ULL;
01805                         }
01806                     }
01807 
01808                     if (IS_INTERLACED(mb_type) &&
01809                         s->codec_id == CODEC_ID_H264) {
01810                         // hmm
01811                     }
01812                 }
01813                 s->mbskip_table[mb_index] = 0;
01814             }
01815         }
01816     }
01817 }
01818 
01819 static inline int hpel_motion_lowres(MpegEncContext *s,
01820                                      uint8_t *dest, uint8_t *src,
01821                                      int field_based, int field_select,
01822                                      int src_x, int src_y,
01823                                      int width, int height, int stride,
01824                                      int h_edge_pos, int v_edge_pos,
01825                                      int w, int h, h264_chroma_mc_func *pix_op,
01826                                      int motion_x, int motion_y)
01827 {
01828     const int lowres   = s->avctx->lowres;
01829     const int op_index = FFMIN(lowres, 2);
01830     const int s_mask   = (2 << lowres) - 1;
01831     int emu = 0;
01832     int sx, sy;
01833 
01834     if (s->quarter_sample) {
01835         motion_x /= 2;
01836         motion_y /= 2;
01837     }
01838 
01839     sx = motion_x & s_mask;
01840     sy = motion_y & s_mask;
01841     src_x += motion_x >> lowres + 1;
01842     src_y += motion_y >> lowres + 1;
01843 
01844     src   += src_y * stride + src_x;
01845 
01846     if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w,                 0) ||
01847         (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
01848         s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w + 1,
01849                                 (h + 1) << field_based, src_x,
01850                                 src_y   << field_based,
01851                                 h_edge_pos,
01852                                 v_edge_pos);
01853         src = s->edge_emu_buffer;
01854         emu = 1;
01855     }
01856 
01857     sx = (sx << 2) >> lowres;
01858     sy = (sy << 2) >> lowres;
01859     if (field_select)
01860         src += s->linesize;
01861     pix_op[op_index](dest, src, stride, h, sx, sy);
01862     return emu;
01863 }
01864 
01865 /* apply one mpeg motion vector to the three components */
01866 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
01867                                                 uint8_t *dest_y,
01868                                                 uint8_t *dest_cb,
01869                                                 uint8_t *dest_cr,
01870                                                 int field_based,
01871                                                 int bottom_field,
01872                                                 int field_select,
01873                                                 uint8_t **ref_picture,
01874                                                 h264_chroma_mc_func *pix_op,
01875                                                 int motion_x, int motion_y,
01876                                                 int h, int mb_y)
01877 {
01878     uint8_t *ptr_y, *ptr_cb, *ptr_cr;
01879     int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy,
01880         uvsx, uvsy;
01881     const int lowres     = s->avctx->lowres;
01882     const int op_index   = FFMIN(lowres, 2);
01883     const int block_s    = 8>>lowres;
01884     const int s_mask     = (2 << lowres) - 1;
01885     const int h_edge_pos = s->h_edge_pos >> lowres;
01886     const int v_edge_pos = s->v_edge_pos >> lowres;
01887     linesize   = s->current_picture.f.linesize[0] << field_based;
01888     uvlinesize = s->current_picture.f.linesize[1] << field_based;
01889 
01890     // FIXME obviously not perfect but qpel will not work in lowres anyway
01891     if (s->quarter_sample) {
01892         motion_x /= 2;
01893         motion_y /= 2;
01894     }
01895 
01896     if (field_based) {
01897         motion_y += (bottom_field - field_select) * (1 << lowres - 1);
01898     }
01899 
01900     sx = motion_x & s_mask;
01901     sy = motion_y & s_mask;
01902     src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
01903     src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
01904 
01905     if (s->out_format == FMT_H263) {
01906         uvsx    = ((motion_x >> 1) & s_mask) | (sx & 1);
01907         uvsy    = ((motion_y >> 1) & s_mask) | (sy & 1);
01908         uvsrc_x = src_x >> 1;
01909         uvsrc_y = src_y >> 1;
01910     } else if (s->out_format == FMT_H261) {
01911         // even chroma mv's are full pel in H261
01912         mx      = motion_x / 4;
01913         my      = motion_y / 4;
01914         uvsx    = (2 * mx) & s_mask;
01915         uvsy    = (2 * my) & s_mask;
01916         uvsrc_x = s->mb_x * block_s + (mx >> lowres);
01917         uvsrc_y =    mb_y * block_s + (my >> lowres);
01918     } else {
01919         mx      = motion_x / 2;
01920         my      = motion_y / 2;
01921         uvsx    = mx & s_mask;
01922         uvsy    = my & s_mask;
01923         uvsrc_x = s->mb_x * block_s                 + (mx >> lowres + 1);
01924         uvsrc_y =   (mb_y * block_s >> field_based) + (my >> lowres + 1);
01925     }
01926 
01927     ptr_y  = ref_picture[0] + src_y   * linesize   + src_x;
01928     ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
01929     ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
01930 
01931     if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s,       0) ||
01932         (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
01933         s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
01934                                 s->linesize, 17, 17 + field_based,
01935                                 src_x, src_y << field_based, h_edge_pos,
01936                                 v_edge_pos);
01937         ptr_y = s->edge_emu_buffer;
01938         if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
01939             uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
01940             s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9,
01941                                     9 + field_based,
01942                                     uvsrc_x, uvsrc_y << field_based,
01943                                     h_edge_pos >> 1, v_edge_pos >> 1);
01944             s->dsp.emulated_edge_mc(uvbuf + 16, ptr_cr, s->uvlinesize, 9,
01945                                     9 + field_based,
01946                                     uvsrc_x, uvsrc_y << field_based,
01947                                     h_edge_pos >> 1, v_edge_pos >> 1);
01948             ptr_cb = uvbuf;
01949             ptr_cr = uvbuf + 16;
01950         }
01951     }
01952 
01953     // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
01954     if (bottom_field) {
01955         dest_y  += s->linesize;
01956         dest_cb += s->uvlinesize;
01957         dest_cr += s->uvlinesize;
01958     }
01959 
01960     if (field_select) {
01961         ptr_y   += s->linesize;
01962         ptr_cb  += s->uvlinesize;
01963         ptr_cr  += s->uvlinesize;
01964     }
01965 
01966     sx = (sx << 2) >> lowres;
01967     sy = (sy << 2) >> lowres;
01968     pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
01969 
01970     if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
01971         uvsx = (uvsx << 2) >> lowres;
01972         uvsy = (uvsy << 2) >> lowres;
01973         pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift,
01974                          uvsx, uvsy);
01975         pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift,
01976                          uvsx, uvsy);
01977     }
01978     // FIXME h261 lowres loop filter
01979 }
01980 
01981 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
01982                                             uint8_t *dest_cb, uint8_t *dest_cr,
01983                                             uint8_t **ref_picture,
01984                                             h264_chroma_mc_func * pix_op,
01985                                             int mx, int my)
01986 {
01987     const int lowres     = s->avctx->lowres;
01988     const int op_index   = FFMIN(lowres, 2);
01989     const int block_s    = 8 >> lowres;
01990     const int s_mask     = (2 << lowres) - 1;
01991     const int h_edge_pos = s->h_edge_pos >> lowres + 1;
01992     const int v_edge_pos = s->v_edge_pos >> lowres + 1;
01993     int emu = 0, src_x, src_y, offset, sx, sy;
01994     uint8_t *ptr;
01995 
01996     if (s->quarter_sample) {
01997         mx /= 2;
01998         my /= 2;
01999     }
02000 
02001     /* In case of 8X8, we construct a single chroma motion vector
02002        with a special rounding */
02003     mx = ff_h263_round_chroma(mx);
02004     my = ff_h263_round_chroma(my);
02005 
02006     sx = mx & s_mask;
02007     sy = my & s_mask;
02008     src_x = s->mb_x * block_s + (mx >> lowres + 1);
02009     src_y = s->mb_y * block_s + (my >> lowres + 1);
02010 
02011     offset = src_y * s->uvlinesize + src_x;
02012     ptr = ref_picture[1] + offset;
02013     if (s->flags & CODEC_FLAG_EMU_EDGE) {
02014         if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
02015             (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
02016             s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize,
02017                                     9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
02018             ptr = s->edge_emu_buffer;
02019             emu = 1;
02020         }
02021     }
02022     sx = (sx << 2) >> lowres;
02023     sy = (sy << 2) >> lowres;
02024     pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
02025 
02026     ptr = ref_picture[2] + offset;
02027     if (emu) {
02028         s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
02029                                 src_x, src_y, h_edge_pos, v_edge_pos);
02030         ptr = s->edge_emu_buffer;
02031     }
02032     pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
02033 }
02034 
02046 static inline void MPV_motion_lowres(MpegEncContext *s,
02047                                      uint8_t *dest_y, uint8_t *dest_cb,
02048                                      uint8_t *dest_cr,
02049                                      int dir, uint8_t **ref_picture,
02050                                      h264_chroma_mc_func *pix_op)
02051 {
02052     int mx, my;
02053     int mb_x, mb_y, i;
02054     const int lowres  = s->avctx->lowres;
02055     const int block_s = 8 >>lowres;
02056 
02057     mb_x = s->mb_x;
02058     mb_y = s->mb_y;
02059 
02060     switch (s->mv_type) {
02061     case MV_TYPE_16X16:
02062         mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02063                            0, 0, 0,
02064                            ref_picture, pix_op,
02065                            s->mv[dir][0][0], s->mv[dir][0][1],
02066                            2 * block_s, mb_y);
02067         break;
02068     case MV_TYPE_8X8:
02069         mx = 0;
02070         my = 0;
02071         for (i = 0; i < 4; i++) {
02072             hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
02073                                s->linesize) * block_s,
02074                                ref_picture[0], 0, 0,
02075                                (2 * mb_x + (i & 1)) * block_s,
02076                                (2 * mb_y + (i >> 1)) * block_s,
02077                                s->width, s->height, s->linesize,
02078                                s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
02079                                block_s, block_s, pix_op,
02080                                s->mv[dir][i][0], s->mv[dir][i][1]);
02081 
02082             mx += s->mv[dir][i][0];
02083             my += s->mv[dir][i][1];
02084         }
02085 
02086         if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
02087             chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
02088                                      pix_op, mx, my);
02089         break;
02090     case MV_TYPE_FIELD:
02091         if (s->picture_structure == PICT_FRAME) {
02092             /* top field */
02093             mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02094                                1, 0, s->field_select[dir][0],
02095                                ref_picture, pix_op,
02096                                s->mv[dir][0][0], s->mv[dir][0][1],
02097                                block_s, mb_y);
02098             /* bottom field */
02099             mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02100                                1, 1, s->field_select[dir][1],
02101                                ref_picture, pix_op,
02102                                s->mv[dir][1][0], s->mv[dir][1][1],
02103                                block_s, mb_y);
02104         } else {
02105             if (s->picture_structure != s->field_select[dir][0] + 1 &&
02106                 s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
02107                 ref_picture = s->current_picture_ptr->f.data;
02108 
02109             }
02110             mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02111                                0, 0, s->field_select[dir][0],
02112                                ref_picture, pix_op,
02113                                s->mv[dir][0][0],
02114                                s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
02115             }
02116         break;
02117     case MV_TYPE_16X8:
02118         for (i = 0; i < 2; i++) {
02119             uint8_t **ref2picture;
02120 
02121             if (s->picture_structure == s->field_select[dir][i] + 1 ||
02122                 s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
02123                 ref2picture = ref_picture;
02124             } else {
02125                 ref2picture = s->current_picture_ptr->f.data;
02126             }
02127 
02128             mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02129                                0, 0, s->field_select[dir][i],
02130                                ref2picture, pix_op,
02131                                s->mv[dir][i][0], s->mv[dir][i][1] +
02132                                2 * block_s * i, block_s, mb_y >> 1);
02133 
02134             dest_y  +=  2 * block_s *  s->linesize;
02135             dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
02136             dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
02137         }
02138         break;
02139     case MV_TYPE_DMV:
02140         if (s->picture_structure == PICT_FRAME) {
02141             for (i = 0; i < 2; i++) {
02142                 int j;
02143                 for (j = 0; j < 2; j++) {
02144                     mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02145                                        1, j, j ^ i,
02146                                        ref_picture, pix_op,
02147                                        s->mv[dir][2 * i + j][0],
02148                                        s->mv[dir][2 * i + j][1],
02149                                        block_s, mb_y);
02150                 }
02151                 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
02152             }
02153         } else {
02154             for (i = 0; i < 2; i++) {
02155                 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02156                                    0, 0, s->picture_structure != i + 1,
02157                                    ref_picture, pix_op,
02158                                    s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
02159                                    2 * block_s, mb_y >> 1);
02160 
02161                 // after put we make avg of the same block
02162                 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
02163 
02164                 // opposite parity is always in the same
02165                 // frame if this is second field
02166                 if (!s->first_field) {
02167                     ref_picture = s->current_picture_ptr->f.data;
02168                 }
02169             }
02170         }
02171         break;
02172     default:
02173         assert(0);
02174     }
02175 }
02176 
02180 int MPV_lowest_referenced_row(MpegEncContext *s, int dir)
02181 {
02182     int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
02183     int my, off, i, mvs;
02184 
02185     if (s->picture_structure != PICT_FRAME) goto unhandled;
02186 
02187     switch (s->mv_type) {
02188         case MV_TYPE_16X16:
02189             mvs = 1;
02190             break;
02191         case MV_TYPE_16X8:
02192             mvs = 2;
02193             break;
02194         case MV_TYPE_8X8:
02195             mvs = 4;
02196             break;
02197         default:
02198             goto unhandled;
02199     }
02200 
02201     for (i = 0; i < mvs; i++) {
02202         my = s->mv[dir][i][1]<<qpel_shift;
02203         my_max = FFMAX(my_max, my);
02204         my_min = FFMIN(my_min, my);
02205     }
02206 
02207     off = (FFMAX(-my_min, my_max) + 63) >> 6;
02208 
02209     return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
02210 unhandled:
02211     return s->mb_height-1;
02212 }
02213 
02214 /* put block[] to dest[] */
02215 static inline void put_dct(MpegEncContext *s,
02216                            DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
02217 {
02218     s->dct_unquantize_intra(s, block, i, qscale);
02219     s->dsp.idct_put (dest, line_size, block);
02220 }
02221 
02222 /* add block[] to dest[] */
02223 static inline void add_dct(MpegEncContext *s,
02224                            DCTELEM *block, int i, uint8_t *dest, int line_size)
02225 {
02226     if (s->block_last_index[i] >= 0) {
02227         s->dsp.idct_add (dest, line_size, block);
02228     }
02229 }
02230 
02231 static inline void add_dequant_dct(MpegEncContext *s,
02232                            DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
02233 {
02234     if (s->block_last_index[i] >= 0) {
02235         s->dct_unquantize_inter(s, block, i, qscale);
02236 
02237         s->dsp.idct_add (dest, line_size, block);
02238     }
02239 }
02240 
02244 void ff_clean_intra_table_entries(MpegEncContext *s)
02245 {
02246     int wrap = s->b8_stride;
02247     int xy = s->block_index[0];
02248 
02249     s->dc_val[0][xy           ] =
02250     s->dc_val[0][xy + 1       ] =
02251     s->dc_val[0][xy     + wrap] =
02252     s->dc_val[0][xy + 1 + wrap] = 1024;
02253     /* ac pred */
02254     memset(s->ac_val[0][xy       ], 0, 32 * sizeof(int16_t));
02255     memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
02256     if (s->msmpeg4_version>=3) {
02257         s->coded_block[xy           ] =
02258         s->coded_block[xy + 1       ] =
02259         s->coded_block[xy     + wrap] =
02260         s->coded_block[xy + 1 + wrap] = 0;
02261     }
02262     /* chroma */
02263     wrap = s->mb_stride;
02264     xy = s->mb_x + s->mb_y * wrap;
02265     s->dc_val[1][xy] =
02266     s->dc_val[2][xy] = 1024;
02267     /* ac pred */
02268     memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
02269     memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
02270 
02271     s->mbintra_table[xy]= 0;
02272 }
02273 
02274 /* generic function called after a macroblock has been parsed by the
02275    decoder or after it has been encoded by the encoder.
02276 
02277    Important variables used:
02278    s->mb_intra : true if intra macroblock
02279    s->mv_dir   : motion vector direction
02280    s->mv_type  : motion vector type
02281    s->mv       : motion vector
02282    s->interlaced_dct : true if interlaced dct used (mpeg2)
02283  */
02284 static av_always_inline
02285 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
02286                             int lowres_flag, int is_mpeg12)
02287 {
02288     const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
02289     if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
02290         ff_xvmc_decode_mb(s);//xvmc uses pblocks
02291         return;
02292     }
02293 
02294     if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
02295        /* save DCT coefficients */
02296        int i,j;
02297        DCTELEM *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
02298        av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
02299        for(i=0; i<6; i++){
02300            for(j=0; j<64; j++){
02301                *dct++ = block[i][s->dsp.idct_permutation[j]];
02302                av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
02303            }
02304            av_log(s->avctx, AV_LOG_DEBUG, "\n");
02305        }
02306     }
02307 
02308     s->current_picture.f.qscale_table[mb_xy] = s->qscale;
02309 
02310     /* update DC predictors for P macroblocks */
02311     if (!s->mb_intra) {
02312         if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
02313             if(s->mbintra_table[mb_xy])
02314                 ff_clean_intra_table_entries(s);
02315         } else {
02316             s->last_dc[0] =
02317             s->last_dc[1] =
02318             s->last_dc[2] = 128 << s->intra_dc_precision;
02319         }
02320     }
02321     else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
02322         s->mbintra_table[mb_xy]=1;
02323 
02324     if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
02325         uint8_t *dest_y, *dest_cb, *dest_cr;
02326         int dct_linesize, dct_offset;
02327         op_pixels_func (*op_pix)[4];
02328         qpel_mc_func (*op_qpix)[16];
02329         const int linesize   = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
02330         const int uvlinesize = s->current_picture.f.linesize[1];
02331         const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
02332         const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
02333 
02334         /* avoid copy if macroblock skipped in last frame too */
02335         /* skip only during decoding as we might trash the buffers during encoding a bit */
02336         if(!s->encoding){
02337             uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
02338 
02339             if (s->mb_skipped) {
02340                 s->mb_skipped= 0;
02341                 assert(s->pict_type!=AV_PICTURE_TYPE_I);
02342                 *mbskip_ptr = 1;
02343             } else if(!s->current_picture.f.reference) {
02344                 *mbskip_ptr = 1;
02345             } else{
02346                 *mbskip_ptr = 0; /* not skipped */
02347             }
02348         }
02349 
02350         dct_linesize = linesize << s->interlaced_dct;
02351         dct_offset   = s->interlaced_dct ? linesize : linesize * block_size;
02352 
02353         if(readable){
02354             dest_y=  s->dest[0];
02355             dest_cb= s->dest[1];
02356             dest_cr= s->dest[2];
02357         }else{
02358             dest_y = s->b_scratchpad;
02359             dest_cb= s->b_scratchpad+16*linesize;
02360             dest_cr= s->b_scratchpad+32*linesize;
02361         }
02362 
02363         if (!s->mb_intra) {
02364             /* motion handling */
02365             /* decoding or more than one mb_type (MC was already done otherwise) */
02366             if(!s->encoding){
02367 
02368                 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
02369                     if (s->mv_dir & MV_DIR_FORWARD) {
02370                         ff_thread_await_progress((AVFrame*)s->last_picture_ptr, MPV_lowest_referenced_row(s, 0), 0);
02371                     }
02372                     if (s->mv_dir & MV_DIR_BACKWARD) {
02373                         ff_thread_await_progress((AVFrame*)s->next_picture_ptr, MPV_lowest_referenced_row(s, 1), 0);
02374                     }
02375                 }
02376 
02377                 if(lowres_flag){
02378                     h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
02379 
02380                     if (s->mv_dir & MV_DIR_FORWARD) {
02381                         MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
02382                         op_pix = s->dsp.avg_h264_chroma_pixels_tab;
02383                     }
02384                     if (s->mv_dir & MV_DIR_BACKWARD) {
02385                         MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
02386                     }
02387                 }else{
02388                     op_qpix= s->me.qpel_put;
02389                     if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
02390                         op_pix = s->dsp.put_pixels_tab;
02391                     }else{
02392                         op_pix = s->dsp.put_no_rnd_pixels_tab;
02393                     }
02394                     if (s->mv_dir & MV_DIR_FORWARD) {
02395                         MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
02396                         op_pix = s->dsp.avg_pixels_tab;
02397                         op_qpix= s->me.qpel_avg;
02398                     }
02399                     if (s->mv_dir & MV_DIR_BACKWARD) {
02400                         MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
02401                     }
02402                 }
02403             }
02404 
02405             /* skip dequant / idct if we are really late ;) */
02406             if(s->avctx->skip_idct){
02407                 if(  (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
02408                    ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
02409                    || s->avctx->skip_idct >= AVDISCARD_ALL)
02410                     goto skip_idct;
02411             }
02412 
02413             /* add dct residue */
02414             if(s->encoding || !(   s->msmpeg4_version || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
02415                                 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
02416                 add_dequant_dct(s, block[0], 0, dest_y                          , dct_linesize, s->qscale);
02417                 add_dequant_dct(s, block[1], 1, dest_y              + block_size, dct_linesize, s->qscale);
02418                 add_dequant_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize, s->qscale);
02419                 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
02420 
02421                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02422                     if (s->chroma_y_shift){
02423                         add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
02424                         add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
02425                     }else{
02426                         dct_linesize >>= 1;
02427                         dct_offset >>=1;
02428                         add_dequant_dct(s, block[4], 4, dest_cb,              dct_linesize, s->chroma_qscale);
02429                         add_dequant_dct(s, block[5], 5, dest_cr,              dct_linesize, s->chroma_qscale);
02430                         add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
02431                         add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
02432                     }
02433                 }
02434             } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
02435                 add_dct(s, block[0], 0, dest_y                          , dct_linesize);
02436                 add_dct(s, block[1], 1, dest_y              + block_size, dct_linesize);
02437                 add_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize);
02438                 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
02439 
02440                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02441                     if(s->chroma_y_shift){//Chroma420
02442                         add_dct(s, block[4], 4, dest_cb, uvlinesize);
02443                         add_dct(s, block[5], 5, dest_cr, uvlinesize);
02444                     }else{
02445                         //chroma422
02446                         dct_linesize = uvlinesize << s->interlaced_dct;
02447                         dct_offset   = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
02448 
02449                         add_dct(s, block[4], 4, dest_cb, dct_linesize);
02450                         add_dct(s, block[5], 5, dest_cr, dct_linesize);
02451                         add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
02452                         add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
02453                         if(!s->chroma_x_shift){//Chroma444
02454                             add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
02455                             add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
02456                             add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
02457                             add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
02458                         }
02459                     }
02460                 }//fi gray
02461             }
02462             else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
02463                 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
02464             }
02465         } else {
02466             /* dct only in intra block */
02467             if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
02468                 put_dct(s, block[0], 0, dest_y                          , dct_linesize, s->qscale);
02469                 put_dct(s, block[1], 1, dest_y              + block_size, dct_linesize, s->qscale);
02470                 put_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize, s->qscale);
02471                 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
02472 
02473                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02474                     if(s->chroma_y_shift){
02475                         put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
02476                         put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
02477                     }else{
02478                         dct_offset >>=1;
02479                         dct_linesize >>=1;
02480                         put_dct(s, block[4], 4, dest_cb,              dct_linesize, s->chroma_qscale);
02481                         put_dct(s, block[5], 5, dest_cr,              dct_linesize, s->chroma_qscale);
02482                         put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
02483                         put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
02484                     }
02485                 }
02486             }else{
02487                 s->dsp.idct_put(dest_y                          , dct_linesize, block[0]);
02488                 s->dsp.idct_put(dest_y              + block_size, dct_linesize, block[1]);
02489                 s->dsp.idct_put(dest_y + dct_offset             , dct_linesize, block[2]);
02490                 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
02491 
02492                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02493                     if(s->chroma_y_shift){
02494                         s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
02495                         s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
02496                     }else{
02497 
02498                         dct_linesize = uvlinesize << s->interlaced_dct;
02499                         dct_offset   = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
02500 
02501                         s->dsp.idct_put(dest_cb,              dct_linesize, block[4]);
02502                         s->dsp.idct_put(dest_cr,              dct_linesize, block[5]);
02503                         s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
02504                         s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
02505                         if(!s->chroma_x_shift){//Chroma444
02506                             s->dsp.idct_put(dest_cb + 8,              dct_linesize, block[8]);
02507                             s->dsp.idct_put(dest_cr + 8,              dct_linesize, block[9]);
02508                             s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
02509                             s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
02510                         }
02511                     }
02512                 }//gray
02513             }
02514         }
02515 skip_idct:
02516         if(!readable){
02517             s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y ,   linesize,16);
02518             s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
02519             s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
02520         }
02521     }
02522 }
02523 
02524 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
02525 #if !CONFIG_SMALL
02526     if(s->out_format == FMT_MPEG1) {
02527         if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
02528         else                 MPV_decode_mb_internal(s, block, 0, 1);
02529     } else
02530 #endif
02531     if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
02532     else                  MPV_decode_mb_internal(s, block, 0, 0);
02533 }
02534 
02538 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
02539     const int field_pic= s->picture_structure != PICT_FRAME;
02540     if(field_pic){
02541         h <<= 1;
02542         y <<= 1;
02543     }
02544 
02545     if (!s->avctx->hwaccel
02546        && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
02547        && s->unrestricted_mv
02548        && s->current_picture.f.reference
02549        && !s->intra_only
02550        && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
02551         int sides = 0, edge_h;
02552         int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
02553         int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
02554         if (y==0) sides |= EDGE_TOP;
02555         if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
02556 
02557         edge_h= FFMIN(h, s->v_edge_pos - y);
02558 
02559         s->dsp.draw_edges(s->current_picture_ptr->f.data[0] +  y         *s->linesize,
02560                           s->linesize,           s->h_edge_pos,         edge_h,
02561                           EDGE_WIDTH,            EDGE_WIDTH,            sides);
02562         s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize,
02563                           s->uvlinesize,         s->h_edge_pos>>hshift, edge_h>>vshift,
02564                           EDGE_WIDTH>>hshift,    EDGE_WIDTH>>vshift,    sides);
02565         s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize,
02566                           s->uvlinesize,         s->h_edge_pos>>hshift, edge_h>>vshift,
02567                           EDGE_WIDTH>>hshift,    EDGE_WIDTH>>vshift,    sides);
02568     }
02569 
02570     h= FFMIN(h, s->avctx->height - y);
02571 
02572     if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
02573 
02574     if (s->avctx->draw_horiz_band) {
02575         AVFrame *src;
02576         int offset[AV_NUM_DATA_POINTERS];
02577         int i;
02578 
02579         if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
02580             src= (AVFrame*)s->current_picture_ptr;
02581         else if(s->last_picture_ptr)
02582             src= (AVFrame*)s->last_picture_ptr;
02583         else
02584             return;
02585 
02586         if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
02587             for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
02588                 offset[i] = 0;
02589         }else{
02590             offset[0]= y * s->linesize;
02591             offset[1]=
02592             offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
02593             for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
02594                 offset[i] = 0;
02595         }
02596 
02597         emms_c();
02598 
02599         s->avctx->draw_horiz_band(s->avctx, src, offset,
02600                                   y, s->picture_structure, h);
02601     }
02602 }
02603 
02604 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
02605     const int linesize   = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
02606     const int uvlinesize = s->current_picture.f.linesize[1];
02607     const int mb_size= 4 - s->avctx->lowres;
02608 
02609     s->block_index[0]= s->b8_stride*(s->mb_y*2    ) - 2 + s->mb_x*2;
02610     s->block_index[1]= s->b8_stride*(s->mb_y*2    ) - 1 + s->mb_x*2;
02611     s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
02612     s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
02613     s->block_index[4]= s->mb_stride*(s->mb_y + 1)                + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
02614     s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
02615     //block_index is not used by mpeg2, so it is not affected by chroma_format
02616 
02617     s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) <<  mb_size);
02618     s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
02619     s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
02620 
02621     if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
02622     {
02623         if(s->picture_structure==PICT_FRAME){
02624         s->dest[0] += s->mb_y *   linesize << mb_size;
02625         s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
02626         s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
02627         }else{
02628             s->dest[0] += (s->mb_y>>1) *   linesize << mb_size;
02629             s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
02630             s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
02631             assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
02632         }
02633     }
02634 }
02635 
02636 void ff_mpeg_flush(AVCodecContext *avctx){
02637     int i;
02638     MpegEncContext *s = avctx->priv_data;
02639 
02640     if(s==NULL || s->picture==NULL)
02641         return;
02642 
02643     for(i=0; i<s->picture_count; i++){
02644        if (s->picture[i].f.data[0] &&
02645            (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
02646             s->picture[i].f.type == FF_BUFFER_TYPE_USER))
02647         free_frame_buffer(s, &s->picture[i]);
02648     }
02649     s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
02650 
02651     s->mb_x= s->mb_y= 0;
02652 
02653     s->parse_context.state= -1;
02654     s->parse_context.frame_start_found= 0;
02655     s->parse_context.overread= 0;
02656     s->parse_context.overread_index= 0;
02657     s->parse_context.index= 0;
02658     s->parse_context.last_index= 0;
02659     s->bitstream_buffer_size=0;
02660     s->pp_time=0;
02661 }
02662 
02663 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
02664                                    DCTELEM *block, int n, int qscale)
02665 {
02666     int i, level, nCoeffs;
02667     const uint16_t *quant_matrix;
02668 
02669     nCoeffs= s->block_last_index[n];
02670 
02671     if (n < 4)
02672         block[0] = block[0] * s->y_dc_scale;
02673     else
02674         block[0] = block[0] * s->c_dc_scale;
02675     /* XXX: only mpeg1 */
02676     quant_matrix = s->intra_matrix;
02677     for(i=1;i<=nCoeffs;i++) {
02678         int j= s->intra_scantable.permutated[i];
02679         level = block[j];
02680         if (level) {
02681             if (level < 0) {
02682                 level = -level;
02683                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02684                 level = (level - 1) | 1;
02685                 level = -level;
02686             } else {
02687                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02688                 level = (level - 1) | 1;
02689             }
02690             block[j] = level;
02691         }
02692     }
02693 }
02694 
02695 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
02696                                    DCTELEM *block, int n, int qscale)
02697 {
02698     int i, level, nCoeffs;
02699     const uint16_t *quant_matrix;
02700 
02701     nCoeffs= s->block_last_index[n];
02702 
02703     quant_matrix = s->inter_matrix;
02704     for(i=0; i<=nCoeffs; i++) {
02705         int j= s->intra_scantable.permutated[i];
02706         level = block[j];
02707         if (level) {
02708             if (level < 0) {
02709                 level = -level;
02710                 level = (((level << 1) + 1) * qscale *
02711                          ((int) (quant_matrix[j]))) >> 4;
02712                 level = (level - 1) | 1;
02713                 level = -level;
02714             } else {
02715                 level = (((level << 1) + 1) * qscale *
02716                          ((int) (quant_matrix[j]))) >> 4;
02717                 level = (level - 1) | 1;
02718             }
02719             block[j] = level;
02720         }
02721     }
02722 }
02723 
02724 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
02725                                    DCTELEM *block, int n, int qscale)
02726 {
02727     int i, level, nCoeffs;
02728     const uint16_t *quant_matrix;
02729 
02730     if(s->alternate_scan) nCoeffs= 63;
02731     else nCoeffs= s->block_last_index[n];
02732 
02733     if (n < 4)
02734         block[0] = block[0] * s->y_dc_scale;
02735     else
02736         block[0] = block[0] * s->c_dc_scale;
02737     quant_matrix = s->intra_matrix;
02738     for(i=1;i<=nCoeffs;i++) {
02739         int j= s->intra_scantable.permutated[i];
02740         level = block[j];
02741         if (level) {
02742             if (level < 0) {
02743                 level = -level;
02744                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02745                 level = -level;
02746             } else {
02747                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02748             }
02749             block[j] = level;
02750         }
02751     }
02752 }
02753 
02754 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
02755                                    DCTELEM *block, int n, int qscale)
02756 {
02757     int i, level, nCoeffs;
02758     const uint16_t *quant_matrix;
02759     int sum=-1;
02760 
02761     if(s->alternate_scan) nCoeffs= 63;
02762     else nCoeffs= s->block_last_index[n];
02763 
02764     if (n < 4)
02765         block[0] = block[0] * s->y_dc_scale;
02766     else
02767         block[0] = block[0] * s->c_dc_scale;
02768     quant_matrix = s->intra_matrix;
02769     for(i=1;i<=nCoeffs;i++) {
02770         int j= s->intra_scantable.permutated[i];
02771         level = block[j];
02772         if (level) {
02773             if (level < 0) {
02774                 level = -level;
02775                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02776                 level = -level;
02777             } else {
02778                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02779             }
02780             block[j] = level;
02781             sum+=level;
02782         }
02783     }
02784     block[63]^=sum&1;
02785 }
02786 
02787 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
02788                                    DCTELEM *block, int n, int qscale)
02789 {
02790     int i, level, nCoeffs;
02791     const uint16_t *quant_matrix;
02792     int sum=-1;
02793 
02794     if(s->alternate_scan) nCoeffs= 63;
02795     else nCoeffs= s->block_last_index[n];
02796 
02797     quant_matrix = s->inter_matrix;
02798     for(i=0; i<=nCoeffs; i++) {
02799         int j= s->intra_scantable.permutated[i];
02800         level = block[j];
02801         if (level) {
02802             if (level < 0) {
02803                 level = -level;
02804                 level = (((level << 1) + 1) * qscale *
02805                          ((int) (quant_matrix[j]))) >> 4;
02806                 level = -level;
02807             } else {
02808                 level = (((level << 1) + 1) * qscale *
02809                          ((int) (quant_matrix[j]))) >> 4;
02810             }
02811             block[j] = level;
02812             sum+=level;
02813         }
02814     }
02815     block[63]^=sum&1;
02816 }
02817 
02818 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
02819                                   DCTELEM *block, int n, int qscale)
02820 {
02821     int i, level, qmul, qadd;
02822     int nCoeffs;
02823 
02824     assert(s->block_last_index[n]>=0);
02825 
02826     qmul = qscale << 1;
02827 
02828     if (!s->h263_aic) {
02829         if (n < 4)
02830             block[0] = block[0] * s->y_dc_scale;
02831         else
02832             block[0] = block[0] * s->c_dc_scale;
02833         qadd = (qscale - 1) | 1;
02834     }else{
02835         qadd = 0;
02836     }
02837     if(s->ac_pred)
02838         nCoeffs=63;
02839     else
02840         nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
02841 
02842     for(i=1; i<=nCoeffs; i++) {
02843         level = block[i];
02844         if (level) {
02845             if (level < 0) {
02846                 level = level * qmul - qadd;
02847             } else {
02848                 level = level * qmul + qadd;
02849             }
02850             block[i] = level;
02851         }
02852     }
02853 }
02854 
02855 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
02856                                   DCTELEM *block, int n, int qscale)
02857 {
02858     int i, level, qmul, qadd;
02859     int nCoeffs;
02860 
02861     assert(s->block_last_index[n]>=0);
02862 
02863     qadd = (qscale - 1) | 1;
02864     qmul = qscale << 1;
02865 
02866     nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
02867 
02868     for(i=0; i<=nCoeffs; i++) {
02869         level = block[i];
02870         if (level) {
02871             if (level < 0) {
02872                 level = level * qmul - qadd;
02873             } else {
02874                 level = level * qmul + qadd;
02875             }
02876             block[i] = level;
02877         }
02878     }
02879 }
02880 
02884 void ff_set_qscale(MpegEncContext * s, int qscale)
02885 {
02886     if (qscale < 1)
02887         qscale = 1;
02888     else if (qscale > 31)
02889         qscale = 31;
02890 
02891     s->qscale = qscale;
02892     s->chroma_qscale= s->chroma_qscale_table[qscale];
02893 
02894     s->y_dc_scale= s->y_dc_scale_table[ qscale ];
02895     s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
02896 }
02897 
02898 void MPV_report_decode_progress(MpegEncContext *s)
02899 {
02900     if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->error_occurred)
02901         ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_y, 0);
02902 }
Generated on Sat Mar 17 2012 12:57:47 for Libav by doxygen 1.7.1