diff --git a/src/add-ons/media/plugins/avcodec/libavcodec/mpeg12.c b/src/add-ons/media/plugins/avcodec/libavcodec/mpeg12.c index 3edad5b802..a48a84bd28 100644 --- a/src/add-ons/media/plugins/avcodec/libavcodec/mpeg12.c +++ b/src/add-ons/media/plugins/avcodec/libavcodec/mpeg12.c @@ -1,115 +1,104 @@ /* - * MPEG1 codec / MPEG2 decoder + * MPEG-1/2 decoder * Copyright (c) 2000,2001 Fabrice Bellard. - * Copyright (c) 2002-2004 Michael Niedermayer + * Copyright (c) 2002-2004 Michael Niedermayer * - * This library is free software; you can redistribute it and/or + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. + * version 2.1 of the License, or (at your option) any later version. * - * This library is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ - + /** * @file mpeg12.c - * MPEG1/2 codec + * MPEG-1/2 decoder */ - + //#define DEBUG #include "avcodec.h" #include "dsputil.h" #include "mpegvideo.h" +#include "mpeg12.h" #include "mpeg12data.h" +#include "mpeg12decdata.h" +#include "bytestream.h" //#undef NDEBUG //#include -/* Start codes. */ -#define SEQ_END_CODE 0x000001b7 -#define SEQ_START_CODE 0x000001b3 -#define GOP_START_CODE 0x000001b8 -#define PICTURE_START_CODE 0x00000100 -#define SLICE_MIN_START_CODE 0x00000101 -#define SLICE_MAX_START_CODE 0x000001af -#define EXT_START_CODE 0x000001b5 -#define USER_START_CODE 0x000001b2 - -#define DC_VLC_BITS 9 #define MV_VLC_BITS 9 #define MBINCR_VLC_BITS 9 #define MB_PAT_VLC_BITS 9 #define MB_PTYPE_VLC_BITS 6 #define MB_BTYPE_VLC_BITS 6 -#define TEX_VLC_BITS 9 -#ifdef CONFIG_ENCODERS -static void mpeg1_encode_block(MpegEncContext *s, - DCTELEM *block, - int component); -static void mpeg1_encode_motion(MpegEncContext *s, int val, int f_or_b_code); // RAL: f_code parameter added -#endif //CONFIG_ENCODERS -static inline int mpeg1_decode_block_inter(MpegEncContext *s, - DCTELEM *block, +static inline int mpeg1_decode_block_inter(MpegEncContext *s, + DCTELEM *block, int n); -static inline int mpeg1_decode_block_intra(MpegEncContext *s, - DCTELEM *block, +static inline int mpeg1_decode_block_intra(MpegEncContext *s, + DCTELEM *block, int n); -static inline int mpeg2_decode_block_non_intra(MpegEncContext *s, - DCTELEM *block, +static inline int mpeg1_fast_decode_block_inter(MpegEncContext *s, DCTELEM *block, int n); +static inline int mpeg2_decode_block_non_intra(MpegEncContext *s, + DCTELEM *block, int n); -static inline int mpeg2_decode_block_intra(MpegEncContext *s, - DCTELEM *block, +static inline int mpeg2_decode_block_intra(MpegEncContext *s, + DCTELEM *block, int n); +static inline int mpeg2_fast_decode_block_non_intra(MpegEncContext *s, DCTELEM *block, int n); +static inline int mpeg2_fast_decode_block_intra(MpegEncContext *s, DCTELEM *block, int n); static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred); static void exchange_uv(MpegEncContext *s); -#ifdef HAVE_XVMC extern int XVMC_field_start(MpegEncContext *s, AVCodecContext *avctx); extern int XVMC_field_end(MpegEncContext *s); extern void XVMC_pack_pblocks(MpegEncContext *s,int cbp); extern void XVMC_init_block(MpegEncContext *s);//set s->block -#endif -#ifdef CONFIG_ENCODERS -static uint8_t (*mv_penalty)[MAX_MV*2+1]= NULL; -static uint8_t fcode_tab[MAX_MV*2+1]; +static const enum PixelFormat pixfmt_yuv_420[]= {PIX_FMT_YUV420P,PIX_FMT_NONE}; +static const enum PixelFormat pixfmt_yuv_422[]= {PIX_FMT_YUV422P,PIX_FMT_NONE}; +static const enum PixelFormat pixfmt_yuv_444[]= {PIX_FMT_YUV444P,PIX_FMT_NONE}; +static const enum PixelFormat pixfmt_xvmc_mpg2_420[] = { + PIX_FMT_XVMC_MPEG2_IDCT, + PIX_FMT_XVMC_MPEG2_MC, + PIX_FMT_NONE}; -static uint32_t uni_mpeg1_ac_vlc_bits[64*64*2]; -static uint8_t uni_mpeg1_ac_vlc_len [64*64*2]; +uint8_t ff_mpeg12_static_rl_table_store[2][2][2*MAX_RUN + MAX_LEVEL + 3]; -/* simple include everything table for dc, first byte is bits number next 3 are code*/ -static uint32_t mpeg1_lum_dc_uni[512]; -static uint32_t mpeg1_chr_dc_uni[512]; -static uint8_t mpeg1_index_run[2][64]; -static int8_t mpeg1_max_level[2][64]; -#endif //CONFIG_ENCODERS +#define INIT_2D_VLC_RL(rl, static_size)\ +{\ + static RL_VLC_ELEM rl_vlc_table[static_size];\ + INIT_VLC_STATIC(&rl.vlc, TEX_VLC_BITS, rl.n + 2,\ + &rl.table_vlc[0][1], 4, 2,\ + &rl.table_vlc[0][0], 4, 2, static_size);\ +\ + rl.rl_vlc[0]= rl_vlc_table;\ + init_2d_vlc_rl(&rl);\ +} static void init_2d_vlc_rl(RLTable *rl) { int i; - - init_vlc(&rl->vlc, TEX_VLC_BITS, rl->n + 2, - &rl->table_vlc[0][1], 4, 2, - &rl->table_vlc[0][0], 4, 2); - - rl->rl_vlc[0]= av_malloc(rl->vlc.table_size*sizeof(RL_VLC_ELEM)); for(i=0; ivlc.table_size; i++){ int code= rl->vlc.table[i][0]; int len = rl->vlc.table[i][1]; int level, run; - + if(len==0){ // illegal code run= 65; level= MAX_LEVEL; @@ -134,221 +123,12 @@ static void init_2d_vlc_rl(RLTable *rl) } } -#ifdef CONFIG_ENCODERS -static void init_uni_ac_vlc(RLTable *rl, uint32_t *uni_ac_vlc_bits, uint8_t *uni_ac_vlc_len){ - int i; - - for(i=0; i<128; i++){ - int level= i-64; - int run; - for(run=0; run<64; run++){ - int len, bits, code; - - int alevel= ABS(level); - int sign= (level>>31)&1; - - if (alevel > rl->max_level[0][run]) - code= 111; /*rl->n*/ - else - code= rl->index_run[0][run] + alevel - 1; - - if (code < 111 /* rl->n */) { - /* store the vlc & sign at once */ - len= mpeg1_vlc[code][1]+1; - bits= (mpeg1_vlc[code][0]<<1) + sign; - } else { - len= mpeg1_vlc[111/*rl->n*/][1]+6; - bits= mpeg1_vlc[111/*rl->n*/][0]<<6; - - bits|= run; - if (alevel < 128) { - bits<<=8; len+=8; - bits|= level & 0xff; - } else { - bits<<=16; len+=16; - bits|= level & 0xff; - if (level < 0) { - bits|= 0x8001 + level + 255; - } else { - bits|= level & 0xffff; - } - } - } - - uni_ac_vlc_bits[UNI_AC_ENC_INDEX(run, i)]= bits; - uni_ac_vlc_len [UNI_AC_ENC_INDEX(run, i)]= len; - } - } -} - - -static int find_frame_rate_index(MpegEncContext *s){ - int i; - int64_t dmin= INT64_MAX; - int64_t d; - - for(i=1;i<14;i++) { - if(s->avctx->strict_std_compliance >= 0 && i>=9) break; - - d = ABS(MPEG1_FRAME_RATE_BASE*(int64_t)s->avctx->frame_rate - frame_rate_tab[i]*(int64_t)s->avctx->frame_rate_base); - if(d < dmin){ - dmin=d; - s->frame_rate_index= i; - } - } - if(dmin) - return -1; - else - return 0; -} - -static int encode_init(AVCodecContext *avctx) +void ff_mpeg12_common_init(MpegEncContext *s) { - MpegEncContext *s = avctx->priv_data; - if(MPV_encode_init(avctx) < 0) - return -1; - - if(find_frame_rate_index(s) < 0){ - if(s->strict_std_compliance >=0){ - av_log(avctx, AV_LOG_ERROR, "MPEG1/2 doesnt support %d/%d fps\n", avctx->frame_rate, avctx->frame_rate_base); - return -1; - }else{ - av_log(avctx, AV_LOG_INFO, "MPEG1/2 doesnt support %d/%d fps, there may be AV sync issues\n", avctx->frame_rate, avctx->frame_rate_base); - } - } - - return 0; -} - -static void put_header(MpegEncContext *s, int header) -{ - align_put_bits(&s->pb); - put_bits(&s->pb, 16, header>>16); - put_bits(&s->pb, 16, header&0xFFFF); -} - -/* put sequence header if needed */ -static void mpeg1_encode_sequence_header(MpegEncContext *s) -{ - unsigned int vbv_buffer_size; - unsigned int fps, v; - int n, i; - uint64_t time_code; - float best_aspect_error= 1E10; - float aspect_ratio= av_q2d(s->avctx->sample_aspect_ratio); - int constraint_parameter_flag; - - if(aspect_ratio==0.0) aspect_ratio= 1.0; //pixel aspect 1:1 (VGA) - - if (s->current_picture.key_frame) { - /* mpeg1 header repeated every gop */ - put_header(s, SEQ_START_CODE); - - put_bits(&s->pb, 12, s->width); - put_bits(&s->pb, 12, s->height); - - for(i=1; i<15; i++){ - float error= aspect_ratio; - if(s->codec_id == CODEC_ID_MPEG1VIDEO || i <=1) - error-= 1.0/mpeg1_aspect[i]; - else - error-= av_q2d(mpeg2_aspect[i])*s->height/s->width; - - error= ABS(error); - - if(error < best_aspect_error){ - best_aspect_error= error; - s->aspect_ratio_info= i; - } - } - - put_bits(&s->pb, 4, s->aspect_ratio_info); - put_bits(&s->pb, 4, s->frame_rate_index); - - if(s->avctx->rc_max_rate){ - v = (s->avctx->rc_max_rate + 399) / 400; - if (v > 0x3ffff && s->codec_id == CODEC_ID_MPEG1VIDEO) - v = 0x3ffff; - }else{ - v= 0x3FFFF; - } - - if(s->avctx->rc_buffer_size) - vbv_buffer_size = s->avctx->rc_buffer_size; - else - /* VBV calculation: Scaled so that a VCD has the proper VBV size of 40 kilobytes */ - vbv_buffer_size = (( 20 * s->bit_rate) / (1151929 / 2)) * 8 * 1024; - vbv_buffer_size= (vbv_buffer_size + 16383) / 16384; - - put_bits(&s->pb, 18, v & 0x3FFFF); - put_bits(&s->pb, 1, 1); /* marker */ - put_bits(&s->pb, 10, vbv_buffer_size & 0x3FF); - - constraint_parameter_flag= - s->width <= 768 && s->height <= 576 && - s->mb_width * s->mb_height <= 396 && - s->mb_width * s->mb_height * frame_rate_tab[s->frame_rate_index] <= MPEG1_FRAME_RATE_BASE*396*25 && - frame_rate_tab[s->frame_rate_index] <= MPEG1_FRAME_RATE_BASE*30 && - vbv_buffer_size <= 20 && - v <= 1856000/400 && - s->codec_id == CODEC_ID_MPEG1VIDEO; - - put_bits(&s->pb, 1, constraint_parameter_flag); - - ff_write_quant_matrix(&s->pb, s->avctx->intra_matrix); - ff_write_quant_matrix(&s->pb, s->avctx->inter_matrix); - - if(s->codec_id == CODEC_ID_MPEG2VIDEO){ - put_header(s, EXT_START_CODE); - put_bits(&s->pb, 4, 1); //seq ext - put_bits(&s->pb, 1, 0); //esc - put_bits(&s->pb, 3, 4); //profile - put_bits(&s->pb, 4, 8); //level - put_bits(&s->pb, 1, s->progressive_sequence); - put_bits(&s->pb, 2, 1); //chroma format 4:2:0 - put_bits(&s->pb, 2, 0); //horizontal size ext - put_bits(&s->pb, 2, 0); //vertical size ext - put_bits(&s->pb, 12, v>>18); //bitrate ext - put_bits(&s->pb, 1, 1); //marker - put_bits(&s->pb, 8, vbv_buffer_size >>10); //vbv buffer ext - put_bits(&s->pb, 1, s->low_delay); - put_bits(&s->pb, 2, 0); // frame_rate_ext_n - put_bits(&s->pb, 5, 0); // frame_rate_ext_d - } - - put_header(s, GOP_START_CODE); - put_bits(&s->pb, 1, 0); /* do drop frame */ - /* time code : we must convert from the real frame rate to a - fake mpeg frame rate in case of low frame rate */ - fps = (frame_rate_tab[s->frame_rate_index] + MPEG1_FRAME_RATE_BASE/2)/ MPEG1_FRAME_RATE_BASE; - time_code = s->current_picture_ptr->coded_picture_number; - - s->gop_picture_number = time_code; - put_bits(&s->pb, 5, (uint32_t)((time_code / (fps * 3600)) % 24)); - put_bits(&s->pb, 6, (uint32_t)((time_code / (fps * 60)) % 60)); - put_bits(&s->pb, 1, 1); - put_bits(&s->pb, 6, (uint32_t)((time_code / fps) % 60)); - put_bits(&s->pb, 6, (uint32_t)((time_code % fps))); - put_bits(&s->pb, 1, !!(s->flags & CODEC_FLAG_CLOSED_GOP)); - put_bits(&s->pb, 1, 0); /* broken link */ - } -} - -static inline void encode_mb_skip_run(MpegEncContext *s, int run){ - while (run >= 33) { - put_bits(&s->pb, 11, 0x008); - run -= 33; - } - put_bits(&s->pb, mbAddrIncrTable[run][1], - mbAddrIncrTable[run][0]); -} -#endif //CONFIG_ENCODERS - -static void common_init(MpegEncContext *s) -{ s->y_dc_scale_table= - s->c_dc_scale_table= ff_mpeg1_dc_scale_table; + s->c_dc_scale_table= mpeg2_dc_scale_table[s->intra_dc_precision]; + } void ff_mpeg1_clean_buffers(MpegEncContext *s){ @@ -358,623 +138,56 @@ void ff_mpeg1_clean_buffers(MpegEncContext *s){ memset(s->last_mv, 0, sizeof(s->last_mv)); } -#ifdef CONFIG_ENCODERS - -void ff_mpeg1_encode_slice_header(MpegEncContext *s){ - put_header(s, SLICE_MIN_START_CODE + s->mb_y); - put_bits(&s->pb, 5, s->qscale); /* quantizer scale */ - put_bits(&s->pb, 1, 0); /* slice extra information */ -} - -void mpeg1_encode_picture_header(MpegEncContext *s, int picture_number) -{ - mpeg1_encode_sequence_header(s); - - /* mpeg1 picture header */ - put_header(s, PICTURE_START_CODE); - /* temporal reference */ - - // RAL: s->picture_number instead of s->fake_picture_number - put_bits(&s->pb, 10, (s->picture_number - - s->gop_picture_number) & 0x3ff); - put_bits(&s->pb, 3, s->pict_type); - - s->vbv_delay_ptr= s->pb.buf + get_bit_count(&s->pb)/8; - put_bits(&s->pb, 16, 0xFFFF); /* vbv_delay */ - - // RAL: Forward f_code also needed for B frames - if (s->pict_type == P_TYPE || s->pict_type == B_TYPE) { - put_bits(&s->pb, 1, 0); /* half pel coordinates */ - if(s->codec_id == CODEC_ID_MPEG1VIDEO) - put_bits(&s->pb, 3, s->f_code); /* forward_f_code */ - else - put_bits(&s->pb, 3, 7); /* forward_f_code */ - } - - // RAL: Backward f_code necessary for B frames - if (s->pict_type == B_TYPE) { - put_bits(&s->pb, 1, 0); /* half pel coordinates */ - if(s->codec_id == CODEC_ID_MPEG1VIDEO) - put_bits(&s->pb, 3, s->b_code); /* backward_f_code */ - else - put_bits(&s->pb, 3, 7); /* backward_f_code */ - } - - put_bits(&s->pb, 1, 0); /* extra bit picture */ - - s->frame_pred_frame_dct = 1; - if(s->codec_id == CODEC_ID_MPEG2VIDEO){ - put_header(s, EXT_START_CODE); - put_bits(&s->pb, 4, 8); //pic ext - if (s->pict_type == P_TYPE || s->pict_type == B_TYPE) { - put_bits(&s->pb, 4, s->f_code); - put_bits(&s->pb, 4, s->f_code); - }else{ - put_bits(&s->pb, 8, 255); - } - if (s->pict_type == B_TYPE) { - put_bits(&s->pb, 4, s->b_code); - put_bits(&s->pb, 4, s->b_code); - }else{ - put_bits(&s->pb, 8, 255); - } - put_bits(&s->pb, 2, s->intra_dc_precision); - put_bits(&s->pb, 2, s->picture_structure= PICT_FRAME); - if (s->progressive_sequence) { - put_bits(&s->pb, 1, 0); /* no repeat */ - } else { - put_bits(&s->pb, 1, s->current_picture_ptr->top_field_first); - } - /* XXX: optimize the generation of this flag with entropy - measures */ - s->frame_pred_frame_dct = s->progressive_sequence; - - put_bits(&s->pb, 1, s->frame_pred_frame_dct); - put_bits(&s->pb, 1, s->concealment_motion_vectors); - put_bits(&s->pb, 1, s->q_scale_type); - put_bits(&s->pb, 1, s->intra_vlc_format); - put_bits(&s->pb, 1, s->alternate_scan); - put_bits(&s->pb, 1, s->repeat_first_field); - put_bits(&s->pb, 1, s->chroma_420_type=1); - s->progressive_frame = s->progressive_sequence; - put_bits(&s->pb, 1, s->progressive_frame); - put_bits(&s->pb, 1, 0); //composite_display_flag - } - if(s->flags & CODEC_FLAG_SVCD_SCAN_OFFSET){ - int i; - - put_header(s, USER_START_CODE); - for(i=0; ipb, 8, svcd_scan_offset_placeholder[i]); - } - } - - s->mb_y=0; - ff_mpeg1_encode_slice_header(s); -} - -static inline void put_mb_modes(MpegEncContext *s, int n, int bits, - int has_mv, int field_motion) -{ - put_bits(&s->pb, n, bits); - if (!s->frame_pred_frame_dct) { - if (has_mv) - put_bits(&s->pb, 2, 2 - field_motion); /* motion_type: frame/field */ - put_bits(&s->pb, 1, s->interlaced_dct); - } -} - -void mpeg1_encode_mb(MpegEncContext *s, - DCTELEM block[6][64], - int motion_x, int motion_y) -{ - int i, cbp; - const int mb_x = s->mb_x; - const int mb_y = s->mb_y; - const int first_mb= mb_x == s->resync_mb_x && mb_y == s->resync_mb_y; - - /* compute cbp */ - cbp = 0; - for(i=0;i<6;i++) { - if (s->block_last_index[i] >= 0) - cbp |= 1 << (5 - i); - } - - if (cbp == 0 && !first_mb && (mb_x != s->mb_width - 1 || (mb_y != s->mb_height - 1 && s->codec_id == CODEC_ID_MPEG1VIDEO)) && - ((s->pict_type == P_TYPE && s->mv_type == MV_TYPE_16X16 && (motion_x | motion_y) == 0) || - (s->pict_type == B_TYPE && s->mv_dir == s->last_mv_dir && (((s->mv_dir & MV_DIR_FORWARD) ? ((s->mv[0][0][0] - s->last_mv[0][0][0])|(s->mv[0][0][1] - s->last_mv[0][0][1])) : 0) | - ((s->mv_dir & MV_DIR_BACKWARD) ? ((s->mv[1][0][0] - s->last_mv[1][0][0])|(s->mv[1][0][1] - s->last_mv[1][0][1])) : 0)) == 0))) { - s->mb_skip_run++; - s->qscale -= s->dquant; - s->skip_count++; - s->misc_bits++; - s->last_bits++; - if(s->pict_type == P_TYPE){ - s->last_mv[0][1][0]= s->last_mv[0][0][0]= - s->last_mv[0][1][1]= s->last_mv[0][0][1]= 0; - } - } else { - if(first_mb){ - assert(s->mb_skip_run == 0); - encode_mb_skip_run(s, s->mb_x); - }else{ - encode_mb_skip_run(s, s->mb_skip_run); - } - - if (s->pict_type == I_TYPE) { - if(s->dquant && cbp){ - put_mb_modes(s, 2, 1, 0, 0); /* macroblock_type : macroblock_quant = 1 */ - put_bits(&s->pb, 5, s->qscale); - }else{ - put_mb_modes(s, 1, 1, 0, 0); /* macroblock_type : macroblock_quant = 0 */ - s->qscale -= s->dquant; - } - s->misc_bits+= get_bits_diff(s); - s->i_count++; - } else if (s->mb_intra) { - if(s->dquant && cbp){ - put_mb_modes(s, 6, 0x01, 0, 0); - put_bits(&s->pb, 5, s->qscale); - }else{ - put_mb_modes(s, 5, 0x03, 0, 0); - s->qscale -= s->dquant; - } - s->misc_bits+= get_bits_diff(s); - s->i_count++; - memset(s->last_mv, 0, sizeof(s->last_mv)); - } else if (s->pict_type == P_TYPE) { - if(s->mv_type == MV_TYPE_16X16){ - if (cbp != 0) { - if ((motion_x|motion_y) == 0) { - if(s->dquant){ - put_mb_modes(s, 5, 1, 0, 0); /* macroblock_pattern & quant */ - put_bits(&s->pb, 5, s->qscale); - }else{ - put_mb_modes(s, 2, 1, 0, 0); /* macroblock_pattern only */ - } - s->misc_bits+= get_bits_diff(s); - } else { - if(s->dquant){ - put_mb_modes(s, 5, 2, 1, 0); /* motion + cbp */ - put_bits(&s->pb, 5, s->qscale); - }else{ - put_mb_modes(s, 1, 1, 1, 0); /* motion + cbp */ - } - s->misc_bits+= get_bits_diff(s); - mpeg1_encode_motion(s, motion_x - s->last_mv[0][0][0], s->f_code); // RAL: f_code parameter added - mpeg1_encode_motion(s, motion_y - s->last_mv[0][0][1], s->f_code); // RAL: f_code parameter added - s->mv_bits+= get_bits_diff(s); - } - } else { - put_bits(&s->pb, 3, 1); /* motion only */ - if (!s->frame_pred_frame_dct) - put_bits(&s->pb, 2, 2); /* motion_type: frame */ - s->misc_bits+= get_bits_diff(s); - mpeg1_encode_motion(s, motion_x - s->last_mv[0][0][0], s->f_code); // RAL: f_code parameter added - mpeg1_encode_motion(s, motion_y - s->last_mv[0][0][1], s->f_code); // RAL: f_code parameter added - s->qscale -= s->dquant; - s->mv_bits+= get_bits_diff(s); - } - s->last_mv[0][1][0]= s->last_mv[0][0][0]= motion_x; - s->last_mv[0][1][1]= s->last_mv[0][0][1]= motion_y; - }else{ - assert(!s->frame_pred_frame_dct && s->mv_type == MV_TYPE_FIELD); - - if (cbp) { - if(s->dquant){ - put_mb_modes(s, 5, 2, 1, 1); /* motion + cbp */ - put_bits(&s->pb, 5, s->qscale); - }else{ - put_mb_modes(s, 1, 1, 1, 1); /* motion + cbp */ - } - } else { - put_bits(&s->pb, 3, 1); /* motion only */ - put_bits(&s->pb, 2, 1); /* motion_type: field */ - s->qscale -= s->dquant; - } - s->misc_bits+= get_bits_diff(s); - for(i=0; i<2; i++){ - put_bits(&s->pb, 1, s->field_select[0][i]); - mpeg1_encode_motion(s, s->mv[0][i][0] - s->last_mv[0][i][0] , s->f_code); - mpeg1_encode_motion(s, s->mv[0][i][1] - (s->last_mv[0][i][1]>>1), s->f_code); - s->last_mv[0][i][0]= s->mv[0][i][0]; - s->last_mv[0][i][1]= 2*s->mv[0][i][1]; - } - s->mv_bits+= get_bits_diff(s); - } - if(cbp) - put_bits(&s->pb, mbPatTable[cbp - 1][1], mbPatTable[cbp - 1][0]); - s->f_count++; - } else{ - static const int mb_type_len[4]={0,3,4,2}; //bak,for,bi - - if(s->mv_type == MV_TYPE_16X16){ - if (cbp){ // With coded bloc pattern - if (s->dquant) { - if(s->mv_dir == MV_DIR_FORWARD) - put_mb_modes(s, 6, 3, 1, 0); - else - put_mb_modes(s, mb_type_len[s->mv_dir]+3, 2, 1, 0); - put_bits(&s->pb, 5, s->qscale); - } else { - put_mb_modes(s, mb_type_len[s->mv_dir], 3, 1, 0); - } - }else{ // No coded bloc pattern - put_bits(&s->pb, mb_type_len[s->mv_dir], 2); - if (!s->frame_pred_frame_dct) - put_bits(&s->pb, 2, 2); /* motion_type: frame */ - s->qscale -= s->dquant; - } - s->misc_bits += get_bits_diff(s); - if (s->mv_dir&MV_DIR_FORWARD){ - mpeg1_encode_motion(s, s->mv[0][0][0] - s->last_mv[0][0][0], s->f_code); - mpeg1_encode_motion(s, s->mv[0][0][1] - s->last_mv[0][0][1], s->f_code); - s->last_mv[0][0][0]=s->last_mv[0][1][0]= s->mv[0][0][0]; - s->last_mv[0][0][1]=s->last_mv[0][1][1]= s->mv[0][0][1]; - s->f_count++; - } - if (s->mv_dir&MV_DIR_BACKWARD){ - mpeg1_encode_motion(s, s->mv[1][0][0] - s->last_mv[1][0][0], s->b_code); - mpeg1_encode_motion(s, s->mv[1][0][1] - s->last_mv[1][0][1], s->b_code); - s->last_mv[1][0][0]=s->last_mv[1][1][0]= s->mv[1][0][0]; - s->last_mv[1][0][1]=s->last_mv[1][1][1]= s->mv[1][0][1]; - s->b_count++; - } - }else{ - assert(s->mv_type == MV_TYPE_FIELD); - assert(!s->frame_pred_frame_dct); - if (cbp){ // With coded bloc pattern - if (s->dquant) { - if(s->mv_dir == MV_DIR_FORWARD) - put_mb_modes(s, 6, 3, 1, 1); - else - put_mb_modes(s, mb_type_len[s->mv_dir]+3, 2, 1, 1); - put_bits(&s->pb, 5, s->qscale); - } else { - put_mb_modes(s, mb_type_len[s->mv_dir], 3, 1, 1); - } - }else{ // No coded bloc pattern - put_bits(&s->pb, mb_type_len[s->mv_dir], 2); - put_bits(&s->pb, 2, 1); /* motion_type: field */ - s->qscale -= s->dquant; - } - s->misc_bits += get_bits_diff(s); - if (s->mv_dir&MV_DIR_FORWARD){ - for(i=0; i<2; i++){ - put_bits(&s->pb, 1, s->field_select[0][i]); - mpeg1_encode_motion(s, s->mv[0][i][0] - s->last_mv[0][i][0] , s->f_code); - mpeg1_encode_motion(s, s->mv[0][i][1] - (s->last_mv[0][i][1]>>1), s->f_code); - s->last_mv[0][i][0]= s->mv[0][i][0]; - s->last_mv[0][i][1]= 2*s->mv[0][i][1]; - } - s->f_count++; - } - if (s->mv_dir&MV_DIR_BACKWARD){ - for(i=0; i<2; i++){ - put_bits(&s->pb, 1, s->field_select[1][i]); - mpeg1_encode_motion(s, s->mv[1][i][0] - s->last_mv[1][i][0] , s->b_code); - mpeg1_encode_motion(s, s->mv[1][i][1] - (s->last_mv[1][i][1]>>1), s->b_code); - s->last_mv[1][i][0]= s->mv[1][i][0]; - s->last_mv[1][i][1]= 2*s->mv[1][i][1]; - } - s->b_count++; - } - } - s->mv_bits += get_bits_diff(s); - if(cbp) - put_bits(&s->pb, mbPatTable[cbp - 1][1], mbPatTable[cbp - 1][0]); - } - for(i=0;i<6;i++) { - if (cbp & (1 << (5 - i))) { - mpeg1_encode_block(s, block[i], i); - } - } - s->mb_skip_run = 0; - if(s->mb_intra) - s->i_tex_bits+= get_bits_diff(s); - else - s->p_tex_bits+= get_bits_diff(s); - } -} - -// RAL: Parameter added: f_or_b_code -static void mpeg1_encode_motion(MpegEncContext *s, int val, int f_or_b_code) -{ - int code, bit_size, l, m, bits, range, sign; - - if (val == 0) { - /* zero vector */ - code = 0; - put_bits(&s->pb, - mbMotionVectorTable[0][1], - mbMotionVectorTable[0][0]); - } else { - bit_size = f_or_b_code - 1; - range = 1 << bit_size; - /* modulo encoding */ - l = 16 * range; - m = 2 * l; - if (val < -l) { - val += m; - } else if (val >= l) { - val -= m; - } - - if (val >= 0) { - val--; - code = (val >> bit_size) + 1; - bits = val & (range - 1); - sign = 0; - } else { - val = -val; - val--; - code = (val >> bit_size) + 1; - bits = val & (range - 1); - sign = 1; - } - - assert(code > 0 && code <= 16); - - put_bits(&s->pb, - mbMotionVectorTable[code][1], - mbMotionVectorTable[code][0]); - - put_bits(&s->pb, 1, sign); - if (bit_size > 0) { - put_bits(&s->pb, bit_size, bits); - } - } -} - -void ff_mpeg1_encode_init(MpegEncContext *s) -{ - static int done=0; - - common_init(s); - - if(!done){ - int f_code; - int mv; - int i; - - done=1; - init_rl(&rl_mpeg1); - - for(i=0; i<64; i++) - { - mpeg1_max_level[0][i]= rl_mpeg1.max_level[0][i]; - mpeg1_index_run[0][i]= rl_mpeg1.index_run[0][i]; - } - - init_uni_ac_vlc(&rl_mpeg1, uni_mpeg1_ac_vlc_bits, uni_mpeg1_ac_vlc_len); - - /* build unified dc encoding tables */ - for(i=-255; i<256; i++) - { - int adiff, index; - int bits, code; - int diff=i; - - adiff = ABS(diff); - if(diff<0) diff--; - index = vlc_dc_table[adiff]; - - bits= vlc_dc_lum_bits[index] + index; - code= (vlc_dc_lum_code[index]<f_code - 1; - range = 1 << bit_size; - - val=mv; - if (val < 0) - val = -val; - val--; - code = (val >> bit_size) + 1; - if(code<17){ - len= mbMotionVectorTable[code][1] + 1 + bit_size; - }else{ - len= mbMotionVectorTable[16][1] + 2 + bit_size; - } - } - - mv_penalty[f_code][mv+MAX_MV]= len; - } - } - - - for(f_code=MAX_FCODE; f_code>0; f_code--){ - for(mv=-(8<me.mv_penalty= mv_penalty; - s->fcode_tab= fcode_tab; - if(s->codec_id == CODEC_ID_MPEG1VIDEO){ - s->min_qcoeff=-255; - s->max_qcoeff= 255; - }else{ - s->min_qcoeff=-2047; - s->max_qcoeff= 2047; - } - s->intra_ac_vlc_length= - s->inter_ac_vlc_length= - s->intra_ac_vlc_last_length= - s->inter_ac_vlc_last_length= uni_mpeg1_ac_vlc_len; -} - -static inline void encode_dc(MpegEncContext *s, int diff, int component) -{ - if (component == 0) { - put_bits( - &s->pb, - mpeg1_lum_dc_uni[diff+255]&0xFF, - mpeg1_lum_dc_uni[diff+255]>>8); - } else { - put_bits( - &s->pb, - mpeg1_chr_dc_uni[diff+255]&0xFF, - mpeg1_chr_dc_uni[diff+255]>>8); - } -} - -static void mpeg1_encode_block(MpegEncContext *s, - DCTELEM *block, - int n) -{ - int alevel, level, last_non_zero, dc, diff, i, j, run, last_index, sign; - int code, component; -// RLTable *rl = &rl_mpeg1; - - last_index = s->block_last_index[n]; - - /* DC coef */ - if (s->mb_intra) { - component = (n <= 3 ? 0 : n - 4 + 1); - dc = block[0]; /* overflow is impossible */ - diff = dc - s->last_dc[component]; - encode_dc(s, diff, component); - s->last_dc[component] = dc; - i = 1; -/* - if (s->intra_vlc_format) - rl = &rl_mpeg2; - else - rl = &rl_mpeg1; -*/ - } else { - /* encode the first coefficient : needs to be done here because - it is handled slightly differently */ - level = block[0]; - if (abs(level) == 1) { - code = ((uint32_t)level >> 31); /* the sign bit */ - put_bits(&s->pb, 2, code | 0x02); - i = 1; - } else { - i = 0; - last_non_zero = -1; - goto next_coef; - } - } - - /* now quantify & encode AC coefs */ - last_non_zero = i - 1; - - for(;i<=last_index;i++) { - j = s->intra_scantable.permutated[i]; - level = block[j]; - next_coef: -#if 0 - if (level != 0) - dprintf("level[%d]=%d\n", i, level); -#endif - /* encode using VLC */ - if (level != 0) { - run = i - last_non_zero - 1; - - alevel= level; - MASK_ABS(sign, alevel) - sign&=1; - -// code = get_rl_index(rl, 0, run, alevel); - if (alevel <= mpeg1_max_level[0][run]){ - code= mpeg1_index_run[0][run] + alevel - 1; - /* store the vlc & sign at once */ - put_bits(&s->pb, mpeg1_vlc[code][1]+1, (mpeg1_vlc[code][0]<<1) + sign); - } else { - /* escape seems to be pretty rare <5% so i dont optimize it */ - put_bits(&s->pb, mpeg1_vlc[111/*rl->n*/][1], mpeg1_vlc[111/*rl->n*/][0]); - /* escape: only clip in this case */ - put_bits(&s->pb, 6, run); - if(s->codec_id == CODEC_ID_MPEG1VIDEO){ - if (alevel < 128) { - put_bits(&s->pb, 8, level & 0xff); - } else { - if (level < 0) { - put_bits(&s->pb, 16, 0x8001 + level + 255); - } else { - put_bits(&s->pb, 16, level & 0xffff); - } - } - }else{ - put_bits(&s->pb, 12, level & 0xfff); - } - } - last_non_zero = i; - } - } - /* end of block */ - put_bits(&s->pb, 2, 0x2); -} -#endif //CONFIG_ENCODERS /******************************************/ /* decoding */ -static VLC dc_lum_vlc; -static VLC dc_chroma_vlc; static VLC mv_vlc; static VLC mbincr_vlc; static VLC mb_ptype_vlc; static VLC mb_btype_vlc; static VLC mb_pat_vlc; -static void init_vlcs() +av_cold void ff_mpeg12_init_vlcs(void) { static int done = 0; if (!done) { done = 1; - init_vlc(&dc_lum_vlc, DC_VLC_BITS, 12, - vlc_dc_lum_bits, 1, 1, - vlc_dc_lum_code, 2, 2); - init_vlc(&dc_chroma_vlc, DC_VLC_BITS, 12, - vlc_dc_chroma_bits, 1, 1, - vlc_dc_chroma_code, 2, 2); - init_vlc(&mv_vlc, MV_VLC_BITS, 17, - &mbMotionVectorTable[0][1], 2, 1, - &mbMotionVectorTable[0][0], 2, 1); - init_vlc(&mbincr_vlc, MBINCR_VLC_BITS, 36, - &mbAddrIncrTable[0][1], 2, 1, - &mbAddrIncrTable[0][0], 2, 1); - init_vlc(&mb_pat_vlc, MB_PAT_VLC_BITS, 63, - &mbPatTable[0][1], 2, 1, - &mbPatTable[0][0], 2, 1); - - init_vlc(&mb_ptype_vlc, MB_PTYPE_VLC_BITS, 7, - &table_mb_ptype[0][1], 2, 1, - &table_mb_ptype[0][0], 2, 1); - init_vlc(&mb_btype_vlc, MB_BTYPE_VLC_BITS, 11, - &table_mb_btype[0][1], 2, 1, - &table_mb_btype[0][0], 2, 1); - init_rl(&rl_mpeg1); - init_rl(&rl_mpeg2); + INIT_VLC_STATIC(&dc_lum_vlc, DC_VLC_BITS, 12, + ff_mpeg12_vlc_dc_lum_bits, 1, 1, + ff_mpeg12_vlc_dc_lum_code, 2, 2, 512); + INIT_VLC_STATIC(&dc_chroma_vlc, DC_VLC_BITS, 12, + ff_mpeg12_vlc_dc_chroma_bits, 1, 1, + ff_mpeg12_vlc_dc_chroma_code, 2, 2, 514); + INIT_VLC_STATIC(&mv_vlc, MV_VLC_BITS, 17, + &ff_mpeg12_mbMotionVectorTable[0][1], 2, 1, + &ff_mpeg12_mbMotionVectorTable[0][0], 2, 1, 518); + INIT_VLC_STATIC(&mbincr_vlc, MBINCR_VLC_BITS, 36, + &ff_mpeg12_mbAddrIncrTable[0][1], 2, 1, + &ff_mpeg12_mbAddrIncrTable[0][0], 2, 1, 538); + INIT_VLC_STATIC(&mb_pat_vlc, MB_PAT_VLC_BITS, 64, + &ff_mpeg12_mbPatTable[0][1], 2, 1, + &ff_mpeg12_mbPatTable[0][0], 2, 1, 512); - init_2d_vlc_rl(&rl_mpeg1); - init_2d_vlc_rl(&rl_mpeg2); + INIT_VLC_STATIC(&mb_ptype_vlc, MB_PTYPE_VLC_BITS, 7, + &table_mb_ptype[0][1], 2, 1, + &table_mb_ptype[0][0], 2, 1, 64); + INIT_VLC_STATIC(&mb_btype_vlc, MB_BTYPE_VLC_BITS, 11, + &table_mb_btype[0][1], 2, 1, + &table_mb_btype[0][0], 2, 1, 64); + init_rl(&ff_rl_mpeg1, ff_mpeg12_static_rl_table_store[0]); + init_rl(&ff_rl_mpeg2, ff_mpeg12_static_rl_table_store[1]); + + INIT_2D_VLC_RL(ff_rl_mpeg1, 680); + INIT_2D_VLC_RL(ff_rl_mpeg2, 674); } } static inline int get_dmv(MpegEncContext *s) { - if(get_bits1(&s->gb)) + if(get_bits1(&s->gb)) return 1 - (get_bits1(&s->gb) << 1); else return 0; @@ -990,53 +203,42 @@ static inline int get_qscale(MpegEncContext *s) } } -/* motion type (for mpeg2) */ +/* motion type (for MPEG-2) */ #define MT_FIELD 1 #define MT_FRAME 2 #define MT_16X8 2 #define MT_DMV 3 static int mpeg_decode_mb(MpegEncContext *s, - DCTELEM block[6][64]) + DCTELEM block[12][64]) { int i, j, k, cbp, val, mb_type, motion_type; - - dprintf("decode_mb: x=%d y=%d\n", s->mb_x, s->mb_y); + const int mb_block_count = 4 + (1<< s->chroma_format); - assert(s->mb_skiped==0); + dprintf(s->avctx, "decode_mb: x=%d y=%d\n", s->mb_x, s->mb_y); + + assert(s->mb_skipped==0); if (s->mb_skip_run-- != 0) { - if(s->pict_type == I_TYPE){ - av_log(s->avctx, AV_LOG_ERROR, "skiped MB in I frame at %d %d\n", s->mb_x, s->mb_y); - return -1; - } - - /* skip mb */ - s->mb_intra = 0; - for(i=0;i<6;i++) - s->block_last_index[i] = -1; - s->mv_type = MV_TYPE_16X16; - if (s->pict_type == P_TYPE) { - /* if P type, zero motion vector is implied */ - s->mv_dir = MV_DIR_FORWARD; - s->mv[0][0][0] = s->mv[0][0][1] = 0; - s->last_mv[0][0][0] = s->last_mv[0][0][1] = 0; - s->last_mv[0][1][0] = s->last_mv[0][1][1] = 0; - s->mb_skiped = 1; + if (s->pict_type == FF_P_TYPE) { + s->mb_skipped = 1; s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]= MB_TYPE_SKIP | MB_TYPE_L0 | MB_TYPE_16x16; } else { - /* if B type, reuse previous vectors and directions */ - s->mv[0][0][0] = s->last_mv[0][0][0]; - s->mv[0][0][1] = s->last_mv[0][0][1]; - s->mv[1][0][0] = s->last_mv[1][0][0]; - s->mv[1][0][1] = s->last_mv[1][0][1]; + int mb_type; - s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]= - s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride - 1] | MB_TYPE_SKIP; + if(s->mb_x) + mb_type= s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride - 1]; + else + mb_type= s->current_picture.mb_type[ s->mb_width + (s->mb_y-1)*s->mb_stride - 1]; // FIXME not sure if this is allowed in MPEG at all + if(IS_INTRA(mb_type)) + return -1; + + s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]= + mb_type | MB_TYPE_SKIP; // assert(s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride - 1]&(MB_TYPE_16x16|MB_TYPE_16x8)); - if((s->mv[0][0][0]|s->mv[0][0][1]|s->mv[1][0][0]|s->mv[1][0][1])==0) - s->mb_skiped = 1; + if((s->mv[0][0][0]|s->mv[0][0][1]|s->mv[1][0][0]|s->mv[1][0][1])==0) + s->mb_skipped = 1; } return 0; @@ -1044,7 +246,7 @@ static int mpeg_decode_mb(MpegEncContext *s, switch(s->pict_type) { default: - case I_TYPE: + case FF_I_TYPE: if (get_bits1(&s->gb) == 0) { if (get_bits1(&s->gb) == 0){ av_log(s->avctx, AV_LOG_ERROR, "invalid mb type in I Frame at %d %d\n", s->mb_x, s->mb_y); @@ -1055,7 +257,7 @@ static int mpeg_decode_mb(MpegEncContext *s, mb_type = MB_TYPE_INTRA; } break; - case P_TYPE: + case FF_P_TYPE: mb_type = get_vlc2(&s->gb, mb_ptype_vlc.table, MB_PTYPE_VLC_BITS, 1); if (mb_type < 0){ av_log(s->avctx, AV_LOG_ERROR, "invalid mb type in P Frame at %d %d\n", s->mb_x, s->mb_y); @@ -1063,7 +265,7 @@ static int mpeg_decode_mb(MpegEncContext *s, } mb_type = ptype2mb_type[ mb_type ]; break; - case B_TYPE: + case FF_B_TYPE: mb_type = get_vlc2(&s->gb, mb_btype_vlc.table, MB_BTYPE_VLC_BITS, 1); if (mb_type < 0){ av_log(s->avctx, AV_LOG_ERROR, "invalid mb type in B Frame at %d %d\n", s->mb_x, s->mb_y); @@ -1072,26 +274,32 @@ static int mpeg_decode_mb(MpegEncContext *s, mb_type = btype2mb_type[ mb_type ]; break; } - dprintf("mb_type=%x\n", mb_type); + dprintf(s->avctx, "mb_type=%x\n", mb_type); // motion_type = 0; /* avoid warning */ if (IS_INTRA(mb_type)) { - /* compute dct type */ - if (s->picture_structure == PICT_FRAME && //FIXME add a interlaced_dct coded var? + s->dsp.clear_blocks(s->block[0]); + + if(!s->chroma_y_shift){ + s->dsp.clear_blocks(s->block[6]); + } + + /* compute DCT type */ + if (s->picture_structure == PICT_FRAME && //FIXME add an interlaced_dct coded var? !s->frame_pred_frame_dct) { s->interlaced_dct = get_bits1(&s->gb); } if (IS_QUANT(mb_type)) s->qscale = get_qscale(s); - + if (s->concealment_motion_vectors) { /* just parse them */ - if (s->picture_structure != PICT_FRAME) + if (s->picture_structure != PICT_FRAME) skip_bits1(&s->gb); /* field select */ - - s->mv[0][0][0]= s->last_mv[0][0][0]= s->last_mv[0][1][0] = + + s->mv[0][0][0]= s->last_mv[0][0][0]= s->last_mv[0][1][0] = mpeg_decode_motion(s, s->mpeg_f_code[0][0], s->last_mv[0][0][0]); - s->mv[0][0][1]= s->last_mv[0][0][1]= s->last_mv[0][1][1] = + s->mv[0][0][1]= s->last_mv[0][0][1]= s->last_mv[0][1][1] = mpeg_decode_motion(s, s->mpeg_f_code[0][1], s->last_mv[0][0][1]); skip_bits1(&s->gb); /* marker */ @@ -1099,7 +307,7 @@ static int mpeg_decode_mb(MpegEncContext *s, memset(s->last_mv, 0, sizeof(s->last_mv)); /* reset mv prediction */ s->mb_intra = 1; #ifdef HAVE_XVMC - //one 1 we memcpy blocks in xvmcvideo + //if 1, we memcpy blocks in xvmcvideo if(s->avctx->xvmc_acceleration > 1){ XVMC_pack_pblocks(s,-1);//inter are always full blocks if(s->swap_uv){ @@ -1109,9 +317,15 @@ static int mpeg_decode_mb(MpegEncContext *s, #endif if (s->codec_id == CODEC_ID_MPEG2VIDEO) { - for(i=0;i<6;i++) { - if (mpeg2_decode_block_intra(s, s->pblocks[i], i) < 0) - return -1; + if(s->flags2 & CODEC_FLAG2_FAST){ + for(i=0;i<6;i++) { + mpeg2_fast_decode_block_intra(s, s->pblocks[i], i); + } + }else{ + for(i=0;ipblocks[i], i) < 0) + return -1; + } } } else { for(i=0;i<6;i++) { @@ -1123,17 +337,20 @@ static int mpeg_decode_mb(MpegEncContext *s, if (mb_type & MB_TYPE_ZERO_MV){ assert(mb_type & MB_TYPE_CBP); - /* compute dct type */ - if (s->picture_structure == PICT_FRAME && //FIXME add a interlaced_dct coded var? - !s->frame_pred_frame_dct) { - s->interlaced_dct = get_bits1(&s->gb); + s->mv_dir = MV_DIR_FORWARD; + if(s->picture_structure == PICT_FRAME){ + if(!s->frame_pred_frame_dct) + s->interlaced_dct = get_bits1(&s->gb); + s->mv_type = MV_TYPE_16X16; + }else{ + s->mv_type = MV_TYPE_FIELD; + mb_type |= MB_TYPE_INTERLACED; + s->field_select[0][0]= s->picture_structure - 1; } if (IS_QUANT(mb_type)) s->qscale = get_qscale(s); - s->mv_dir = MV_DIR_FORWARD; - s->mv_type = MV_TYPE_16X16; s->last_mv[0][0][0] = 0; s->last_mv[0][0][1] = 0; s->last_mv[0][1][0] = 0; @@ -1143,47 +360,46 @@ static int mpeg_decode_mb(MpegEncContext *s, }else{ assert(mb_type & MB_TYPE_L0L1); //FIXME decide if MBs in field pictures are MB_TYPE_INTERLACED - /* get additionnal motion vector type */ - if (s->frame_pred_frame_dct) + /* get additional motion vector type */ + if (s->frame_pred_frame_dct) motion_type = MT_FRAME; else{ motion_type = get_bits(&s->gb, 2); - } - - /* compute dct type */ - if (s->picture_structure == PICT_FRAME && //FIXME add a interlaced_dct coded var? - !s->frame_pred_frame_dct && HAS_CBP(mb_type)) { - s->interlaced_dct = get_bits1(&s->gb); + if (s->picture_structure == PICT_FRAME && HAS_CBP(mb_type)) + s->interlaced_dct = get_bits1(&s->gb); } if (IS_QUANT(mb_type)) s->qscale = get_qscale(s); /* motion vectors */ - s->mv_dir = 0; - for(i=0;i<2;i++) { - if (USES_LIST(mb_type, i)) { - s->mv_dir |= (MV_DIR_FORWARD >> i); - dprintf("motion_type=%d\n", motion_type); - switch(motion_type) { - case MT_FRAME: /* or MT_16X8 */ - if (s->picture_structure == PICT_FRAME) { + s->mv_dir= (mb_type>>13)&3; + dprintf(s->avctx, "motion_type=%d\n", motion_type); + switch(motion_type) { + case MT_FRAME: /* or MT_16X8 */ + if (s->picture_structure == PICT_FRAME) { + mb_type |= MB_TYPE_16x16; + s->mv_type = MV_TYPE_16X16; + for(i=0;i<2;i++) { + if (USES_LIST(mb_type, i)) { /* MT_FRAME */ - mb_type |= MB_TYPE_16x16; - s->mv_type = MV_TYPE_16X16; - s->mv[i][0][0]= s->last_mv[i][0][0]= s->last_mv[i][1][0] = + s->mv[i][0][0]= s->last_mv[i][0][0]= s->last_mv[i][1][0] = mpeg_decode_motion(s, s->mpeg_f_code[i][0], s->last_mv[i][0][0]); - s->mv[i][0][1]= s->last_mv[i][0][1]= s->last_mv[i][1][1] = + s->mv[i][0][1]= s->last_mv[i][0][1]= s->last_mv[i][1][1] = mpeg_decode_motion(s, s->mpeg_f_code[i][1], s->last_mv[i][0][1]); - /* full_pel: only for mpeg1 */ + /* full_pel: only for MPEG-1 */ if (s->full_pel[i]){ s->mv[i][0][0] <<= 1; s->mv[i][0][1] <<= 1; } - } else { + } + } + } else { + mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED; + s->mv_type = MV_TYPE_16X8; + for(i=0;i<2;i++) { + if (USES_LIST(mb_type, i)) { /* MT_16X8 */ - mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED; - s->mv_type = MV_TYPE_16X8; for(j=0;j<2;j++) { s->field_select[i][j] = get_bits1(&s->gb); for(k=0;k<2;k++) { @@ -1194,26 +410,34 @@ static int mpeg_decode_mb(MpegEncContext *s, } } } - break; - case MT_FIELD: - s->mv_type = MV_TYPE_FIELD; - if (s->picture_structure == PICT_FRAME) { - mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED; + } + } + break; + case MT_FIELD: + s->mv_type = MV_TYPE_FIELD; + if (s->picture_structure == PICT_FRAME) { + mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED; + for(i=0;i<2;i++) { + if (USES_LIST(mb_type, i)) { for(j=0;j<2;j++) { s->field_select[i][j] = get_bits1(&s->gb); val = mpeg_decode_motion(s, s->mpeg_f_code[i][0], s->last_mv[i][j][0]); s->last_mv[i][j][0] = val; s->mv[i][j][0] = val; - dprintf("fmx=%d\n", val); + dprintf(s->avctx, "fmx=%d\n", val); val = mpeg_decode_motion(s, s->mpeg_f_code[i][1], s->last_mv[i][j][1] >> 1); s->last_mv[i][j][1] = val << 1; s->mv[i][j][1] = val; - dprintf("fmy=%d\n", val); + dprintf(s->avctx, "fmy=%d\n", val); } - } else { - mb_type |= MB_TYPE_16x16 | MB_TYPE_INTERLACED; + } + } + } else { + mb_type |= MB_TYPE_16x16 | MB_TYPE_INTERLACED; + for(i=0;i<2;i++) { + if (USES_LIST(mb_type, i)) { s->field_select[i][0] = get_bits1(&s->gb); for(k=0;k<2;k++) { val = mpeg_decode_motion(s, s->mpeg_f_code[i][k], @@ -1223,105 +447,135 @@ static int mpeg_decode_mb(MpegEncContext *s, s->mv[i][0][k] = val; } } - break; - case MT_DMV: - { - int dmx, dmy, mx, my, m; - - mx = mpeg_decode_motion(s, s->mpeg_f_code[i][0], - s->last_mv[i][0][0]); - s->last_mv[i][0][0] = mx; - s->last_mv[i][1][0] = mx; - dmx = get_dmv(s); - my = mpeg_decode_motion(s, s->mpeg_f_code[i][1], - s->last_mv[i][0][1] >> 1); - dmy = get_dmv(s); - s->mv_type = MV_TYPE_DMV; - - - s->last_mv[i][0][1] = my<<1; - s->last_mv[i][1][1] = my<<1; - - s->mv[i][0][0] = mx; - s->mv[i][0][1] = my; - s->mv[i][1][0] = mx;//not used - s->mv[i][1][1] = my;//not used - - if (s->picture_structure == PICT_FRAME) { - mb_type |= MB_TYPE_16x16 | MB_TYPE_INTERLACED; - - //m = 1 + 2 * s->top_field_first; - m = s->top_field_first ? 1 : 3; - - /* top -> top pred */ - s->mv[i][2][0] = ((mx * m + (mx > 0)) >> 1) + dmx; - s->mv[i][2][1] = ((my * m + (my > 0)) >> 1) + dmy - 1; - m = 4 - m; - s->mv[i][3][0] = ((mx * m + (mx > 0)) >> 1) + dmx; - s->mv[i][3][1] = ((my * m + (my > 0)) >> 1) + dmy + 1; - } else { - mb_type |= MB_TYPE_16x16; - - s->mv[i][2][0] = ((mx + (mx > 0)) >> 1) + dmx; - s->mv[i][2][1] = ((my + (my > 0)) >> 1) + dmy; - if(s->picture_structure == PICT_TOP_FIELD) - s->mv[i][2][1]--; - else - s->mv[i][2][1]++; - } - } - break; - default: - av_log(s->avctx, AV_LOG_ERROR, "00 motion_type at %d %d\n", s->mb_x, s->mb_y); - return -1; } } + break; + case MT_DMV: + s->mv_type = MV_TYPE_DMV; + for(i=0;i<2;i++) { + if (USES_LIST(mb_type, i)) { + int dmx, dmy, mx, my, m; + mx = mpeg_decode_motion(s, s->mpeg_f_code[i][0], + s->last_mv[i][0][0]); + s->last_mv[i][0][0] = mx; + s->last_mv[i][1][0] = mx; + dmx = get_dmv(s); + my = mpeg_decode_motion(s, s->mpeg_f_code[i][1], + s->last_mv[i][0][1] >> 1); + dmy = get_dmv(s); + + + s->last_mv[i][0][1] = my<<1; + s->last_mv[i][1][1] = my<<1; + + s->mv[i][0][0] = mx; + s->mv[i][0][1] = my; + s->mv[i][1][0] = mx;//not used + s->mv[i][1][1] = my;//not used + + if (s->picture_structure == PICT_FRAME) { + mb_type |= MB_TYPE_16x16 | MB_TYPE_INTERLACED; + + //m = 1 + 2 * s->top_field_first; + m = s->top_field_first ? 1 : 3; + + /* top -> top pred */ + s->mv[i][2][0] = ((mx * m + (mx > 0)) >> 1) + dmx; + s->mv[i][2][1] = ((my * m + (my > 0)) >> 1) + dmy - 1; + m = 4 - m; + s->mv[i][3][0] = ((mx * m + (mx > 0)) >> 1) + dmx; + s->mv[i][3][1] = ((my * m + (my > 0)) >> 1) + dmy + 1; + } else { + mb_type |= MB_TYPE_16x16; + + s->mv[i][2][0] = ((mx + (mx > 0)) >> 1) + dmx; + s->mv[i][2][1] = ((my + (my > 0)) >> 1) + dmy; + if(s->picture_structure == PICT_TOP_FIELD) + s->mv[i][2][1]--; + else + s->mv[i][2][1]++; + } + } + } + break; + default: + av_log(s->avctx, AV_LOG_ERROR, "00 motion_type at %d %d\n", s->mb_x, s->mb_y); + return -1; } } - - s->mb_intra = 0; + s->mb_intra = 0; if (HAS_CBP(mb_type)) { + s->dsp.clear_blocks(s->block[0]); + cbp = get_vlc2(&s->gb, mb_pat_vlc.table, MB_PAT_VLC_BITS, 1); - if (cbp < 0){ + if(mb_block_count > 6){ + cbp<<= mb_block_count-6; + cbp |= get_bits(&s->gb, mb_block_count-6); + s->dsp.clear_blocks(s->block[6]); + } + if (cbp <= 0){ av_log(s->avctx, AV_LOG_ERROR, "invalid cbp at %d %d\n", s->mb_x, s->mb_y); return -1; } - cbp++; #ifdef HAVE_XVMC - //on 1 we memcpy blocks in xvmcvideo + //if 1, we memcpy blocks in xvmcvideo if(s->avctx->xvmc_acceleration > 1){ XVMC_pack_pblocks(s,cbp); if(s->swap_uv){ exchange_uv(s); } - } + } #endif if (s->codec_id == CODEC_ID_MPEG2VIDEO) { - for(i=0;i<6;i++) { - if (cbp & 32) { - if (mpeg2_decode_block_non_intra(s, s->pblocks[i], i) < 0) - return -1; - } else { - s->block_last_index[i] = -1; + if(s->flags2 & CODEC_FLAG2_FAST){ + for(i=0;i<6;i++) { + if(cbp & 32) { + mpeg2_fast_decode_block_non_intra(s, s->pblocks[i], i); + } else { + s->block_last_index[i] = -1; + } + cbp+=cbp; + } + }else{ + cbp<<= 12-mb_block_count; + + for(i=0;ipblocks[i], i) < 0) + return -1; + } else { + s->block_last_index[i] = -1; + } + cbp+=cbp; } - cbp+=cbp; } } else { - for(i=0;i<6;i++) { - if (cbp & 32) { - if (mpeg1_decode_block_inter(s, s->pblocks[i], i) < 0) - return -1; - } else { - s->block_last_index[i] = -1; + if(s->flags2 & CODEC_FLAG2_FAST){ + for(i=0;i<6;i++) { + if (cbp & 32) { + mpeg1_fast_decode_block_inter(s, s->pblocks[i], i); + } else { + s->block_last_index[i] = -1; + } + cbp+=cbp; + } + }else{ + for(i=0;i<6;i++) { + if (cbp & 32) { + if (mpeg1_decode_block_inter(s, s->pblocks[i], i) < 0) + return -1; + } else { + s->block_last_index[i] = -1; + } + cbp+=cbp; } - cbp+=cbp; } } }else{ - for(i=0;i<6;i++) + for(i=0;i<12;i++) s->block_last_index[i] = -1; } } @@ -1331,7 +585,7 @@ static int mpeg_decode_mb(MpegEncContext *s, return 0; } -/* as h263, but only 17 codes */ +/* as H.263, but only 17 codes */ static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred) { int code, sign, val, l, shift; @@ -1355,46 +609,25 @@ static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred) if (sign) val = -val; val += pred; - + /* modulo decoding */ - l = 1 << (shift+4); - val = ((val + l)&(l*2-1)) - l; + l= INT_BIT - 5 - shift; + val = (val<>l; return val; } -static inline int decode_dc(GetBitContext *gb, int component) -{ - int code, diff; - - if (component == 0) { - code = get_vlc2(gb, dc_lum_vlc.table, DC_VLC_BITS, 2); - } else { - code = get_vlc2(gb, dc_chroma_vlc.table, DC_VLC_BITS, 2); - } - if (code < 0){ - av_log(NULL, AV_LOG_ERROR, "invalid dc code at\n"); - return 0xffff; - } - if (code == 0) { - diff = 0; - } else { - diff = get_xbits(gb, code); - } - return diff; -} - -static inline int mpeg1_decode_block_intra(MpegEncContext *s, - DCTELEM *block, +static inline int mpeg1_decode_block_intra(MpegEncContext *s, + DCTELEM *block, int n) { int level, dc, diff, i, j, run; int component; - RLTable *rl = &rl_mpeg1; + RLTable *rl = &ff_rl_mpeg1; uint8_t * const scantable= s->intra_scantable.permutated; const uint16_t *quant_matrix= s->intra_matrix; const int qscale= s->qscale; - /* DC coef */ + /* DC coefficient */ component = (n <= 3 ? 0 : n - 4 + 1); diff = decode_dc(&s->gb, component); if (diff >= 0xffff) @@ -1403,15 +636,15 @@ static inline int mpeg1_decode_block_intra(MpegEncContext *s, dc += diff; s->last_dc[component] = dc; block[0] = dc<<3; - dprintf("dc=%d diff=%d\n", dc, diff); + dprintf(s->avctx, "dc=%d diff=%d\n", dc, diff); i = 0; { - OPEN_READER(re, &s->gb); - /* now quantify & encode AC coefs */ + OPEN_READER(re, &s->gb); + /* now quantify & encode AC coefficients */ for(;;) { UPDATE_CACHE(re, &s->gb); - GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2); - + GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0); + if(level == 127){ break; } else if(level != 0) { @@ -1456,56 +689,55 @@ static inline int mpeg1_decode_block_intra(MpegEncContext *s, return 0; } -static inline int mpeg1_decode_block_inter(MpegEncContext *s, - DCTELEM *block, +static inline int mpeg1_decode_block_inter(MpegEncContext *s, + DCTELEM *block, int n) { int level, i, j, run; - RLTable *rl = &rl_mpeg1; + RLTable *rl = &ff_rl_mpeg1; uint8_t * const scantable= s->intra_scantable.permutated; const uint16_t *quant_matrix= s->inter_matrix; const int qscale= s->qscale; { - int v; OPEN_READER(re, &s->gb); i = -1; - /* special case for the first coef. no need to add a second vlc table */ + // special case for first coefficient, no need to add second VLC table UPDATE_CACHE(re, &s->gb); - v= SHOW_UBITS(re, &s->gb, 2); - if (v & 2) { - LAST_SKIP_BITS(re, &s->gb, 2); + if (((int32_t)GET_CACHE(re, &s->gb)) < 0) { level= (3*qscale*quant_matrix[0])>>5; level= (level-1)|1; - if(v&1) + if(GET_CACHE(re, &s->gb)&0x40000000) level= -level; block[0] = level; i++; + SKIP_BITS(re, &s->gb, 2); + if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF) + goto end; } - - /* now quantify & encode AC coefs */ +#if MIN_CACHE_BITS < 19 + UPDATE_CACHE(re, &s->gb); +#endif + /* now quantify & encode AC coefficients */ for(;;) { - UPDATE_CACHE(re, &s->gb); - GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2); - - if(level == 127){ - break; - } else if(level != 0) { + GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0); + + if(level != 0) { i += run; j = scantable[i]; level= ((level*2+1)*qscale*quant_matrix[j])>>5; level= (level-1)|1; level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); - LAST_SKIP_BITS(re, &s->gb, 1); + SKIP_BITS(re, &s->gb, 1); } else { /* escape */ run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6); UPDATE_CACHE(re, &s->gb); level = SHOW_SBITS(re, &s->gb, 8); SKIP_BITS(re, &s->gb, 8); if (level == -128) { - level = SHOW_UBITS(re, &s->gb, 8) - 256; LAST_SKIP_BITS(re, &s->gb, 8); + level = SHOW_UBITS(re, &s->gb, 8) - 256; SKIP_BITS(re, &s->gb, 8); } else if (level == 0) { - level = SHOW_UBITS(re, &s->gb, 8) ; LAST_SKIP_BITS(re, &s->gb, 8); + level = SHOW_UBITS(re, &s->gb, 8) ; SKIP_BITS(re, &s->gb, 8); } i += run; j = scantable[i]; @@ -1525,21 +757,109 @@ static inline int mpeg1_decode_block_inter(MpegEncContext *s, } block[j] = level; +#if MIN_CACHE_BITS < 19 + UPDATE_CACHE(re, &s->gb); +#endif + if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF) + break; +#if MIN_CACHE_BITS >= 19 + UPDATE_CACHE(re, &s->gb); +#endif } +end: + LAST_SKIP_BITS(re, &s->gb, 2); CLOSE_READER(re, &s->gb); } s->block_last_index[n] = i; return 0; } -/* Also does unquantization here, since I will never support mpeg2 - encoding */ -static inline int mpeg2_decode_block_non_intra(MpegEncContext *s, - DCTELEM *block, +static inline int mpeg1_fast_decode_block_inter(MpegEncContext *s, DCTELEM *block, int n) +{ + int level, i, j, run; + RLTable *rl = &ff_rl_mpeg1; + uint8_t * const scantable= s->intra_scantable.permutated; + const int qscale= s->qscale; + + { + OPEN_READER(re, &s->gb); + i = -1; + // special case for first coefficient, no need to add second VLC table + UPDATE_CACHE(re, &s->gb); + if (((int32_t)GET_CACHE(re, &s->gb)) < 0) { + level= (3*qscale)>>1; + level= (level-1)|1; + if(GET_CACHE(re, &s->gb)&0x40000000) + level= -level; + block[0] = level; + i++; + SKIP_BITS(re, &s->gb, 2); + if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF) + goto end; + } +#if MIN_CACHE_BITS < 19 + UPDATE_CACHE(re, &s->gb); +#endif + + /* now quantify & encode AC coefficients */ + for(;;) { + GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0); + + if(level != 0) { + i += run; + j = scantable[i]; + level= ((level*2+1)*qscale)>>1; + level= (level-1)|1; + level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); + SKIP_BITS(re, &s->gb, 1); + } else { + /* escape */ + run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6); + UPDATE_CACHE(re, &s->gb); + level = SHOW_SBITS(re, &s->gb, 8); SKIP_BITS(re, &s->gb, 8); + if (level == -128) { + level = SHOW_UBITS(re, &s->gb, 8) - 256; SKIP_BITS(re, &s->gb, 8); + } else if (level == 0) { + level = SHOW_UBITS(re, &s->gb, 8) ; SKIP_BITS(re, &s->gb, 8); + } + i += run; + j = scantable[i]; + if(level<0){ + level= -level; + level= ((level*2+1)*qscale)>>1; + level= (level-1)|1; + level= -level; + }else{ + level= ((level*2+1)*qscale)>>1; + level= (level-1)|1; + } + } + + block[j] = level; +#if MIN_CACHE_BITS < 19 + UPDATE_CACHE(re, &s->gb); +#endif + if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF) + break; +#if MIN_CACHE_BITS >= 19 + UPDATE_CACHE(re, &s->gb); +#endif + } +end: + LAST_SKIP_BITS(re, &s->gb, 2); + CLOSE_READER(re, &s->gb); + } + s->block_last_index[n] = i; + return 0; +} + + +static inline int mpeg2_decode_block_non_intra(MpegEncContext *s, + DCTELEM *block, int n) { int level, i, j, run; - RLTable *rl = &rl_mpeg1; + RLTable *rl = &ff_rl_mpeg1; uint8_t * const scantable= s->intra_scantable.permutated; const uint16_t *quant_matrix; const int qscale= s->qscale; @@ -1548,7 +868,6 @@ static inline int mpeg2_decode_block_non_intra(MpegEncContext *s, mismatch = 1; { - int v; OPEN_READER(re, &s->gb); i = -1; if (n < 4) @@ -1556,32 +875,33 @@ static inline int mpeg2_decode_block_non_intra(MpegEncContext *s, else quant_matrix = s->chroma_inter_matrix; - /* special case for the first coef. no need to add a second vlc table */ + // special case for first coefficient, no need to add second VLC table UPDATE_CACHE(re, &s->gb); - v= SHOW_UBITS(re, &s->gb, 2); - if (v & 2) { - LAST_SKIP_BITS(re, &s->gb, 2); + if (((int32_t)GET_CACHE(re, &s->gb)) < 0) { level= (3*qscale*quant_matrix[0])>>5; - if(v&1) + if(GET_CACHE(re, &s->gb)&0x40000000) level= -level; block[0] = level; mismatch ^= level; i++; + SKIP_BITS(re, &s->gb, 2); + if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF) + goto end; } +#if MIN_CACHE_BITS < 19 + UPDATE_CACHE(re, &s->gb); +#endif - /* now quantify & encode AC coefs */ + /* now quantify & encode AC coefficients */ for(;;) { - UPDATE_CACHE(re, &s->gb); - GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2); - - if(level == 127){ - break; - } else if(level != 0) { + GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0); + + if(level != 0) { i += run; j = scantable[i]; level= ((level*2+1)*qscale*quant_matrix[j])>>5; level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); - LAST_SKIP_BITS(re, &s->gb, 1); + SKIP_BITS(re, &s->gb, 1); } else { /* escape */ run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6); @@ -1601,20 +921,101 @@ static inline int mpeg2_decode_block_non_intra(MpegEncContext *s, av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y); return -1; } - + mismatch ^= level; block[j] = level; +#if MIN_CACHE_BITS < 19 + UPDATE_CACHE(re, &s->gb); +#endif + if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF) + break; +#if MIN_CACHE_BITS >= 19 + UPDATE_CACHE(re, &s->gb); +#endif } +end: + LAST_SKIP_BITS(re, &s->gb, 2); CLOSE_READER(re, &s->gb); } block[63] ^= (mismatch & 1); - + s->block_last_index[n] = i; return 0; } -static inline int mpeg2_decode_block_intra(MpegEncContext *s, - DCTELEM *block, +static inline int mpeg2_fast_decode_block_non_intra(MpegEncContext *s, + DCTELEM *block, + int n) +{ + int level, i, j, run; + RLTable *rl = &ff_rl_mpeg1; + uint8_t * const scantable= s->intra_scantable.permutated; + const int qscale= s->qscale; + OPEN_READER(re, &s->gb); + i = -1; + + // special case for first coefficient, no need to add second VLC table + UPDATE_CACHE(re, &s->gb); + if (((int32_t)GET_CACHE(re, &s->gb)) < 0) { + level= (3*qscale)>>1; + if(GET_CACHE(re, &s->gb)&0x40000000) + level= -level; + block[0] = level; + i++; + SKIP_BITS(re, &s->gb, 2); + if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF) + goto end; + } +#if MIN_CACHE_BITS < 19 + UPDATE_CACHE(re, &s->gb); +#endif + + /* now quantify & encode AC coefficients */ + for(;;) { + GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0); + + if(level != 0) { + i += run; + j = scantable[i]; + level= ((level*2+1)*qscale)>>1; + level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); + SKIP_BITS(re, &s->gb, 1); + } else { + /* escape */ + run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6); + UPDATE_CACHE(re, &s->gb); + level = SHOW_SBITS(re, &s->gb, 12); SKIP_BITS(re, &s->gb, 12); + + i += run; + j = scantable[i]; + if(level<0){ + level= ((-level*2+1)*qscale)>>1; + level= -level; + }else{ + level= ((level*2+1)*qscale)>>1; + } + } + + block[j] = level; +#if MIN_CACHE_BITS < 19 + UPDATE_CACHE(re, &s->gb); +#endif + if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF) + break; +#if MIN_CACHE_BITS >=19 + UPDATE_CACHE(re, &s->gb); +#endif + } +end: + LAST_SKIP_BITS(re, &s->gb, 2); + CLOSE_READER(re, &s->gb); + s->block_last_index[n] = i; + return 0; +} + + +static inline int mpeg2_decode_block_intra(MpegEncContext *s, + DCTELEM *block, int n) { int level, dc, diff, i, j, run; @@ -1625,13 +1026,13 @@ static inline int mpeg2_decode_block_intra(MpegEncContext *s, const int qscale= s->qscale; int mismatch; - /* DC coef */ + /* DC coefficient */ if (n < 4){ quant_matrix = s->intra_matrix; - component = 0; + component = 0; }else{ quant_matrix = s->chroma_intra_matrix; - component = n - 3; + component = (n&1) + 1; } diff = decode_dc(&s->gb, component); if (diff >= 0xffff) @@ -1640,21 +1041,21 @@ static inline int mpeg2_decode_block_intra(MpegEncContext *s, dc += diff; s->last_dc[component] = dc; block[0] = dc << (3 - s->intra_dc_precision); - dprintf("dc=%d\n", block[0]); + dprintf(s->avctx, "dc=%d\n", block[0]); mismatch = block[0] ^ 1; i = 0; if (s->intra_vlc_format) - rl = &rl_mpeg2; + rl = &ff_rl_mpeg2; else - rl = &rl_mpeg1; + rl = &ff_rl_mpeg1; { - OPEN_READER(re, &s->gb); - /* now quantify & encode AC coefs */ + OPEN_READER(re, &s->gb); + /* now quantify & encode AC coefficients */ for(;;) { UPDATE_CACHE(re, &s->gb); - GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2); - + GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0); + if(level == 127){ break; } else if(level != 0) { @@ -1681,34 +1082,119 @@ static inline int mpeg2_decode_block_intra(MpegEncContext *s, av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y); return -1; } - + mismatch^= level; block[j] = level; } CLOSE_READER(re, &s->gb); } block[63]^= mismatch&1; - + s->block_last_index[n] = i; return 0; } +static inline int mpeg2_fast_decode_block_intra(MpegEncContext *s, + DCTELEM *block, + int n) +{ + int level, dc, diff, j, run; + int component; + RLTable *rl; + uint8_t * scantable= s->intra_scantable.permutated; + const uint16_t *quant_matrix; + const int qscale= s->qscale; + + /* DC coefficient */ + if (n < 4){ + quant_matrix = s->intra_matrix; + component = 0; + }else{ + quant_matrix = s->chroma_intra_matrix; + component = (n&1) + 1; + } + diff = decode_dc(&s->gb, component); + if (diff >= 0xffff) + return -1; + dc = s->last_dc[component]; + dc += diff; + s->last_dc[component] = dc; + block[0] = dc << (3 - s->intra_dc_precision); + if (s->intra_vlc_format) + rl = &ff_rl_mpeg2; + else + rl = &ff_rl_mpeg1; + + { + OPEN_READER(re, &s->gb); + /* now quantify & encode AC coefficients */ + for(;;) { + UPDATE_CACHE(re, &s->gb); + GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0); + + if(level == 127){ + break; + } else if(level != 0) { + scantable += run; + j = *scantable; + level= (level*qscale*quant_matrix[j])>>4; + level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); + LAST_SKIP_BITS(re, &s->gb, 1); + } else { + /* escape */ + run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6); + UPDATE_CACHE(re, &s->gb); + level = SHOW_SBITS(re, &s->gb, 12); SKIP_BITS(re, &s->gb, 12); + scantable += run; + j = *scantable; + if(level<0){ + level= (-level*qscale*quant_matrix[j])>>4; + level= -level; + }else{ + level= (level*qscale*quant_matrix[j])>>4; + } + } + + block[j] = level; + } + CLOSE_READER(re, &s->gb); + } + + s->block_last_index[n] = scantable - s->intra_scantable.permutated; + return 0; +} + typedef struct Mpeg1Context { MpegEncContext mpeg_enc_ctx; int mpeg_enc_ctx_allocated; /* true if decoding context allocated */ int repeat_field; /* true if we must repeat the field */ AVPanScan pan_scan; /** some temporary storage for the panscan */ + int slice_count; + int swap_uv;//indicate VCR2 + int save_aspect_info; + int save_width, save_height; + AVRational frame_rate_ext; ///< MPEG-2 specific framerate modificator + } Mpeg1Context; -static int mpeg_decode_init(AVCodecContext *avctx) +static av_cold int mpeg_decode_init(AVCodecContext *avctx) { Mpeg1Context *s = avctx->priv_data; - + MpegEncContext *s2 = &s->mpeg_enc_ctx; + int i; + + /* we need some permutation to store matrices, + * until MPV_common_init() sets the real permutation. */ + for(i=0;i<64;i++) + s2->dsp.idct_permutation[i]=i; + + MPV_decode_defaults(s2); + s->mpeg_enc_ctx.avctx= avctx; s->mpeg_enc_ctx.flags= avctx->flags; s->mpeg_enc_ctx.flags2= avctx->flags2; - common_init(&s->mpeg_enc_ctx); - init_vlcs(); + ff_mpeg12_common_init(&s->mpeg_enc_ctx); + ff_mpeg12_init_vlcs(); s->mpeg_enc_ctx_allocated = 0; s->mpeg_enc_ctx.picture_number = 0; @@ -1717,121 +1203,207 @@ static int mpeg_decode_init(AVCodecContext *avctx) return 0; } -/* return the 8 bit start code value and update the search - state. Return -1 if no start code found */ -static int find_start_code(uint8_t **pbuf_ptr, uint8_t *buf_end) -{ - uint8_t *buf_ptr; - unsigned int state=0xFFFFFFFF, v; - int val; +static void quant_matrix_rebuild(uint16_t *matrix, const uint8_t *old_perm, + const uint8_t *new_perm){ + uint16_t temp_matrix[64]; + int i; - buf_ptr = *pbuf_ptr; - while (buf_ptr < buf_end) { - v = *buf_ptr++; - if (state == 0x000001) { - state = ((state << 8) | v) & 0xffffff; - val = state; - goto found; - } - state = ((state << 8) | v) & 0xffffff; + memcpy(temp_matrix,matrix,64*sizeof(uint16_t)); + + for(i=0;i<64;i++){ + matrix[new_perm[i]] = temp_matrix[old_perm[i]]; } - val = -1; - found: - *pbuf_ptr = buf_ptr; - return val; } -static int mpeg1_decode_picture(AVCodecContext *avctx, - uint8_t *buf, int buf_size) +/* Call this function when we know all parameters. + * It may be called in different places for MPEG-1 and MPEG-2. */ +static int mpeg_decode_postinit(AVCodecContext *avctx){ + Mpeg1Context *s1 = avctx->priv_data; + MpegEncContext *s = &s1->mpeg_enc_ctx; + uint8_t old_permutation[64]; + + if ( + (s1->mpeg_enc_ctx_allocated == 0)|| + avctx->coded_width != s->width || + avctx->coded_height != s->height|| + s1->save_width != s->width || + s1->save_height != s->height || + s1->save_aspect_info != s->aspect_ratio_info|| + 0) + { + + if (s1->mpeg_enc_ctx_allocated) { + ParseContext pc= s->parse_context; + s->parse_context.buffer=0; + MPV_common_end(s); + s->parse_context= pc; + } + + if( (s->width == 0 )||(s->height == 0)) + return -2; + + avcodec_set_dimensions(avctx, s->width, s->height); + avctx->bit_rate = s->bit_rate; + s1->save_aspect_info = s->aspect_ratio_info; + s1->save_width = s->width; + s1->save_height = s->height; + + /* low_delay may be forced, in this case we will have B-frames + * that behave like P-frames. */ + avctx->has_b_frames = !(s->low_delay); + + if(avctx->sub_id==1){//s->codec_id==avctx->codec_id==CODEC_ID + //MPEG-1 fps + avctx->time_base.den= ff_frame_rate_tab[s->frame_rate_index].num; + avctx->time_base.num= ff_frame_rate_tab[s->frame_rate_index].den; + //MPEG-1 aspect + avctx->sample_aspect_ratio= av_d2q( + 1.0/ff_mpeg1_aspect[s->aspect_ratio_info], 255); + + }else{//MPEG-2 + //MPEG-2 fps + av_reduce( + &s->avctx->time_base.den, + &s->avctx->time_base.num, + ff_frame_rate_tab[s->frame_rate_index].num * s1->frame_rate_ext.num, + ff_frame_rate_tab[s->frame_rate_index].den * s1->frame_rate_ext.den, + 1<<30); + //MPEG-2 aspect + if(s->aspect_ratio_info > 1){ + if( (s1->pan_scan.width == 0 )||(s1->pan_scan.height == 0) ){ + s->avctx->sample_aspect_ratio= + av_div_q( + ff_mpeg2_aspect[s->aspect_ratio_info], + (AVRational){s->width, s->height} + ); + }else{ + s->avctx->sample_aspect_ratio= + av_div_q( + ff_mpeg2_aspect[s->aspect_ratio_info], + (AVRational){s1->pan_scan.width, s1->pan_scan.height} + ); + } + }else{ + s->avctx->sample_aspect_ratio= + ff_mpeg2_aspect[s->aspect_ratio_info]; + } + }//MPEG-2 + + if(avctx->xvmc_acceleration){ + avctx->pix_fmt = avctx->get_format(avctx,pixfmt_xvmc_mpg2_420); + }else{ + if(s->chroma_format < 2){ + avctx->pix_fmt = avctx->get_format(avctx,pixfmt_yuv_420); + }else + if(s->chroma_format == 2){ + avctx->pix_fmt = avctx->get_format(avctx,pixfmt_yuv_422); + }else + if(s->chroma_format > 2){ + avctx->pix_fmt = avctx->get_format(avctx,pixfmt_yuv_444); + } + } + //until then pix_fmt may be changed right after codec init + if( avctx->pix_fmt == PIX_FMT_XVMC_MPEG2_IDCT ) + if( avctx->idct_algo == FF_IDCT_AUTO ) + avctx->idct_algo = FF_IDCT_SIMPLE; + + /* Quantization matrices may need reordering + * if DCT permutation is changed. */ + memcpy(old_permutation,s->dsp.idct_permutation,64*sizeof(uint8_t)); + + if (MPV_common_init(s) < 0) + return -2; + + quant_matrix_rebuild(s->intra_matrix, old_permutation,s->dsp.idct_permutation); + quant_matrix_rebuild(s->inter_matrix, old_permutation,s->dsp.idct_permutation); + quant_matrix_rebuild(s->chroma_intra_matrix,old_permutation,s->dsp.idct_permutation); + quant_matrix_rebuild(s->chroma_inter_matrix,old_permutation,s->dsp.idct_permutation); + + s1->mpeg_enc_ctx_allocated = 1; + } + return 0; +} + +static int mpeg1_decode_picture(AVCodecContext *avctx, + const uint8_t *buf, int buf_size) { Mpeg1Context *s1 = avctx->priv_data; MpegEncContext *s = &s1->mpeg_enc_ctx; int ref, f_code, vbv_delay; + if(mpeg_decode_postinit(s->avctx) < 0) + return -2; + init_get_bits(&s->gb, buf, buf_size*8); ref = get_bits(&s->gb, 10); /* temporal ref */ s->pict_type = get_bits(&s->gb, 3); + if(s->pict_type == 0 || s->pict_type > 3) + return -1; vbv_delay= get_bits(&s->gb, 16); - if (s->pict_type == P_TYPE || s->pict_type == B_TYPE) { + if (s->pict_type == FF_P_TYPE || s->pict_type == FF_B_TYPE) { s->full_pel[0] = get_bits1(&s->gb); f_code = get_bits(&s->gb, 3); - if (f_code == 0) + if (f_code == 0 && avctx->error_resilience >= FF_ER_COMPLIANT) return -1; s->mpeg_f_code[0][0] = f_code; s->mpeg_f_code[0][1] = f_code; } - if (s->pict_type == B_TYPE) { + if (s->pict_type == FF_B_TYPE) { s->full_pel[1] = get_bits1(&s->gb); f_code = get_bits(&s->gb, 3); - if (f_code == 0) + if (f_code == 0 && avctx->error_resilience >= FF_ER_COMPLIANT) return -1; s->mpeg_f_code[1][0] = f_code; s->mpeg_f_code[1][1] = f_code; } s->current_picture.pict_type= s->pict_type; - s->current_picture.key_frame= s->pict_type == I_TYPE; - -// if(avctx->debug & FF_DEBUG_PICT_INFO) -// av_log(avctx, AV_LOG_DEBUG, "vbv_delay %d, ref %d\n", vbv_delay, ref); - + s->current_picture.key_frame= s->pict_type == FF_I_TYPE; + + if(avctx->debug & FF_DEBUG_PICT_INFO) + av_log(avctx, AV_LOG_DEBUG, "vbv_delay %d, ref %d type:%d\n", vbv_delay, ref, s->pict_type); + s->y_dc_scale = 8; s->c_dc_scale = 8; s->first_slice = 1; return 0; } -static void mpeg_decode_sequence_extension(MpegEncContext *s) +static void mpeg_decode_sequence_extension(Mpeg1Context *s1) { + MpegEncContext *s= &s1->mpeg_enc_ctx; int horiz_size_ext, vert_size_ext; int bit_rate_ext; - int frame_rate_ext_n, frame_rate_ext_d; - int level, profile; - skip_bits(&s->gb, 1); /* profil and level esc*/ - profile= get_bits(&s->gb, 3); - level= get_bits(&s->gb, 4); + skip_bits(&s->gb, 1); /* profile and level esc*/ + s->avctx->profile= get_bits(&s->gb, 3); + s->avctx->level= get_bits(&s->gb, 4); s->progressive_sequence = get_bits1(&s->gb); /* progressive_sequence */ - skip_bits(&s->gb, 2); /* chroma_format */ + s->chroma_format = get_bits(&s->gb, 2); /* chroma_format 1=420, 2=422, 3=444 */ horiz_size_ext = get_bits(&s->gb, 2); vert_size_ext = get_bits(&s->gb, 2); s->width |= (horiz_size_ext << 12); s->height |= (vert_size_ext << 12); bit_rate_ext = get_bits(&s->gb, 12); /* XXX: handle it */ - s->bit_rate = ((s->bit_rate / 400) | (bit_rate_ext << 12)) * 400; + s->bit_rate += (bit_rate_ext << 18) * 400; skip_bits1(&s->gb); /* marker */ s->avctx->rc_buffer_size += get_bits(&s->gb, 8)*1024*16<<10; s->low_delay = get_bits1(&s->gb); if(s->flags & CODEC_FLAG_LOW_DELAY) s->low_delay=1; - frame_rate_ext_n = get_bits(&s->gb, 2); - frame_rate_ext_d = get_bits(&s->gb, 5); - av_reduce( - &s->avctx->frame_rate, - &s->avctx->frame_rate_base, - frame_rate_tab[s->frame_rate_index] * (frame_rate_ext_n+1), - MPEG1_FRAME_RATE_BASE * (frame_rate_ext_d+1), - 1<<30); + s1->frame_rate_ext.num = get_bits(&s->gb, 2)+1; + s1->frame_rate_ext.den = get_bits(&s->gb, 5)+1; - dprintf("sequence extension\n"); + dprintf(s->avctx, "sequence extension\n"); s->codec_id= s->avctx->codec_id= CODEC_ID_MPEG2VIDEO; - s->avctx->sub_id = 2; /* indicates mpeg2 found */ + s->avctx->sub_id = 2; /* indicates MPEG-2 found */ - if(s->aspect_ratio_info <= 1) - s->avctx->sample_aspect_ratio= mpeg2_aspect[s->aspect_ratio_info]; - else{ - s->avctx->sample_aspect_ratio= - av_div_q( - mpeg2_aspect[s->aspect_ratio_info], - (AVRational){s->width, s->height} - ); - } - if(s->avctx->debug & FF_DEBUG_PICT_INFO) - av_log(s->avctx, AV_LOG_DEBUG, "profile: %d, level: %d vbv buffer: %d, bitrate:%d\n", - profile, level, s->avctx->rc_buffer_size, s->bit_rate); + av_log(s->avctx, AV_LOG_DEBUG, "profile: %d, level: %d vbv buffer: %d, bitrate:%d\n", + s->avctx->profile, s->avctx->level, s->avctx->rc_buffer_size, s->bit_rate); + } static void mpeg_decode_sequence_display_extension(Mpeg1Context *s1) @@ -1850,17 +1422,10 @@ static void mpeg_decode_sequence_display_extension(Mpeg1Context *s1) skip_bits(&s->gb, 1); //marker h= get_bits(&s->gb, 14); skip_bits(&s->gb, 1); //marker - + s1->pan_scan.width= 16*w; s1->pan_scan.height=16*h; - if(s->aspect_ratio_info > 1) - s->avctx->sample_aspect_ratio= - av_div_q( - mpeg2_aspect[s->aspect_ratio_info], - (AVRational){w, h} - ); - if(s->avctx->debug & FF_DEBUG_PICT_INFO) av_log(s->avctx, AV_LOG_DEBUG, "sde w:%d, h:%d\n", w, h); } @@ -1868,19 +1433,33 @@ static void mpeg_decode_sequence_display_extension(Mpeg1Context *s1) static void mpeg_decode_picture_display_extension(Mpeg1Context *s1) { MpegEncContext *s= &s1->mpeg_enc_ctx; - int i; + int i,nofco; - for(i=0; i<1; i++){ //FIXME count + nofco = 1; + if(s->progressive_sequence){ + if(s->repeat_first_field){ + nofco++; + if(s->top_field_first) + nofco++; + } + }else{ + if(s->picture_structure == PICT_FRAME){ + nofco++; + if(s->repeat_first_field) + nofco++; + } + } + for(i=0; ipan_scan.position[i][0]= get_sbits(&s->gb, 16); skip_bits(&s->gb, 1); //marker s1->pan_scan.position[i][1]= get_sbits(&s->gb, 16); skip_bits(&s->gb, 1); //marker } - + if(s->avctx->debug & FF_DEBUG_PICT_INFO) - av_log(s->avctx, AV_LOG_DEBUG, "pde (%d,%d) (%d,%d) (%d,%d)\n", - s1->pan_scan.position[0][0], s1->pan_scan.position[0][1], - s1->pan_scan.position[1][0], s1->pan_scan.position[1][1], + av_log(s->avctx, AV_LOG_DEBUG, "pde (%d,%d) (%d,%d) (%d,%d)\n", + s1->pan_scan.position[0][0], s1->pan_scan.position[0][1], + s1->pan_scan.position[1][0], s1->pan_scan.position[1][1], s1->pan_scan.position[2][0], s1->pan_scan.position[2][1] ); } @@ -1889,7 +1468,7 @@ static void mpeg_decode_quant_matrix_extension(MpegEncContext *s) { int i, v, j; - dprintf("matrix extension\n"); + dprintf(s->avctx, "matrix extension\n"); if (get_bits1(&s->gb)) { for(i=0;i<64;i++) { @@ -1942,13 +1521,15 @@ static void mpeg_decode_picture_coding_extension(MpegEncContext *s) s->chroma_420_type = get_bits1(&s->gb); s->progressive_frame = get_bits1(&s->gb); - if(s->picture_structure == PICT_FRAME) + if(s->picture_structure == PICT_FRAME){ s->first_field=0; - else{ + s->v_edge_pos= 16*s->mb_height; + }else{ s->first_field ^= 1; + s->v_edge_pos= 8*s->mb_height; memset(s->mbskip_table, 0, s->mb_stride*s->mb_height); } - + if(s->alternate_scan){ ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan); ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan); @@ -1956,32 +1537,32 @@ static void mpeg_decode_picture_coding_extension(MpegEncContext *s) ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct); ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct); } - + /* composite display not parsed */ - dprintf("intra_dc_precision=%d\n", s->intra_dc_precision); - dprintf("picture_structure=%d\n", s->picture_structure); - dprintf("top field first=%d\n", s->top_field_first); - dprintf("repeat first field=%d\n", s->repeat_first_field); - dprintf("conceal=%d\n", s->concealment_motion_vectors); - dprintf("intra_vlc_format=%d\n", s->intra_vlc_format); - dprintf("alternate_scan=%d\n", s->alternate_scan); - dprintf("frame_pred_frame_dct=%d\n", s->frame_pred_frame_dct); - dprintf("progressive_frame=%d\n", s->progressive_frame); + dprintf(s->avctx, "intra_dc_precision=%d\n", s->intra_dc_precision); + dprintf(s->avctx, "picture_structure=%d\n", s->picture_structure); + dprintf(s->avctx, "top field first=%d\n", s->top_field_first); + dprintf(s->avctx, "repeat first field=%d\n", s->repeat_first_field); + dprintf(s->avctx, "conceal=%d\n", s->concealment_motion_vectors); + dprintf(s->avctx, "intra_vlc_format=%d\n", s->intra_vlc_format); + dprintf(s->avctx, "alternate_scan=%d\n", s->alternate_scan); + dprintf(s->avctx, "frame_pred_frame_dct=%d\n", s->frame_pred_frame_dct); + dprintf(s->avctx, "progressive_frame=%d\n", s->progressive_frame); } -static void mpeg_decode_extension(AVCodecContext *avctx, - uint8_t *buf, int buf_size) +static void mpeg_decode_extension(AVCodecContext *avctx, + const uint8_t *buf, int buf_size) { Mpeg1Context *s1 = avctx->priv_data; MpegEncContext *s = &s1->mpeg_enc_ctx; int ext_type; init_get_bits(&s->gb, buf, buf_size*8); - + ext_type = get_bits(&s->gb, 4); switch(ext_type) { case 0x1: - mpeg_decode_sequence_extension(s); + mpeg_decode_sequence_extension(s1); break; case 0x2: mpeg_decode_sequence_display_extension(s1); @@ -1999,50 +1580,19 @@ static void mpeg_decode_extension(AVCodecContext *avctx, } static void exchange_uv(MpegEncContext *s){ -short * tmp; - - tmp = s->pblocks[4]; + short * tmp = s->pblocks[4]; s->pblocks[4] = s->pblocks[5]; s->pblocks[5] = tmp; } -#define DECODE_SLICE_FATAL_ERROR -2 -#define DECODE_SLICE_ERROR -1 -#define DECODE_SLICE_OK 0 +static int mpeg_field_start(MpegEncContext *s){ + AVCodecContext *avctx= s->avctx; + Mpeg1Context *s1 = (Mpeg1Context*)s; -/** - * decodes a slice. - * @return DECODE_SLICE_FATAL_ERROR if a non recoverable error occured
- * DECODE_SLICE_ERROR if the slice is damaged
- * DECODE_SLICE_OK if this slice is ok
- */ -static int mpeg_decode_slice(AVCodecContext *avctx, - AVFrame *pict, - int start_code, - uint8_t **buf, int buf_size) -{ - Mpeg1Context *s1 = avctx->priv_data; - MpegEncContext *s = &s1->mpeg_enc_ctx; - int ret; - const int field_pic= s->picture_structure != PICT_FRAME; - - s->resync_mb_x= s->mb_x = - s->resync_mb_y= s->mb_y = -1; - - start_code = (start_code - 1) & 0xff; - if (start_code >= s->mb_height){ - av_log(s->avctx, AV_LOG_ERROR, "slice below image (%d >= %d)\n", start_code, s->mb_height); - return -1; - } - - ff_mpeg1_clean_buffers(s); - s->interlaced_dct = 0; - /* start frame decoding */ - if (s->first_slice) { - if(s->first_field || s->picture_structure==PICT_FRAME){ + if(s->first_field || s->picture_structure==PICT_FRAME){ if(MPV_frame_start(s, avctx) < 0) - return DECODE_SLICE_FATAL_ERROR; + return -1; ff_er_frame_start(s); @@ -2057,57 +1607,75 @@ static int mpeg_decode_slice(AVCodecContext *avctx, } else if (s->progressive_frame) { s->current_picture_ptr->repeat_pict = 1; } - } + } *s->current_picture_ptr->pan_scan= s1->pan_scan; - }else{ //second field + }else{ //second field int i; - + if(!s->current_picture_ptr){ av_log(s->avctx, AV_LOG_ERROR, "first field missing\n"); return -1; } - + for(i=0; i<4; i++){ s->current_picture.data[i] = s->current_picture_ptr->data[i]; if(s->picture_structure == PICT_BOTTOM_FIELD){ s->current_picture.data[i] += s->current_picture_ptr->linesize[i]; - } + } } - } + } #ifdef HAVE_XVMC // MPV_frame_start will call this function too, // but we need to call it on every field - if(s->avctx->xvmc_acceleration) + if(s->avctx->xvmc_acceleration) XVMC_field_start(s,avctx); #endif - }//fi(s->first_slice) + + return 0; +} + +#define DECODE_SLICE_ERROR -1 +#define DECODE_SLICE_OK 0 + +/** + * decodes a slice. MpegEncContext.mb_y must be set to the MB row from the startcode + * @return DECODE_SLICE_ERROR if the slice is damaged
+ * DECODE_SLICE_OK if this slice is ok
+ */ +static int mpeg_decode_slice(Mpeg1Context *s1, int mb_y, + const uint8_t **buf, int buf_size) +{ + MpegEncContext *s = &s1->mpeg_enc_ctx; + AVCodecContext *avctx= s->avctx; + const int field_pic= s->picture_structure != PICT_FRAME; + const int lowres= s->avctx->lowres; + + s->resync_mb_x= + s->resync_mb_y= -1; + + if (mb_y<= s->mb_height){ + av_log(s->avctx, AV_LOG_ERROR, "slice below image (%d >= %d)\n", mb_y, s->mb_height); + return -1; + } init_get_bits(&s->gb, *buf, buf_size*8); - s->qscale = get_qscale(s); - if (s->first_slice && (s->first_field || s->picture_structure==PICT_FRAME)) { - if(s->avctx->debug&FF_DEBUG_PICT_INFO){ - av_log(s->avctx, AV_LOG_DEBUG, "qp:%d fc:%2d%2d%2d%2d %s %s %s %s %s dc:%d pstruct:%d fdct:%d cmv:%d qtype:%d ivlc:%d rff:%d %s\n", - s->qscale, s->mpeg_f_code[0][0],s->mpeg_f_code[0][1],s->mpeg_f_code[1][0],s->mpeg_f_code[1][1], - s->pict_type == I_TYPE ? "I" : (s->pict_type == P_TYPE ? "P" : (s->pict_type == B_TYPE ? "B" : "S")), - s->progressive_sequence ? "ps" :"", s->progressive_frame ? "pf" : "", s->alternate_scan ? "alt" :"", s->top_field_first ? "top" :"", - s->intra_dc_precision, s->picture_structure, s->frame_pred_frame_dct, s->concealment_motion_vectors, - s->q_scale_type, s->intra_vlc_format, s->repeat_first_field, s->chroma_420_type ? "420" :""); - } - } + ff_mpeg1_clean_buffers(s); + s->interlaced_dct = 0; + + s->qscale = get_qscale(s); - s->first_slice = 0; if(s->qscale == 0){ av_log(s->avctx, AV_LOG_ERROR, "qscale == 0\n"); return -1; } - + /* extra slice info */ while (get_bits1(&s->gb) != 0) { skip_bits(&s->gb, 8); } - + s->mb_x=0; for(;;) { @@ -2126,108 +1694,102 @@ static int mpeg_decode_slice(AVCodecContext *avctx, break; } } - + if(s->mb_x >= (unsigned)s->mb_width){ + av_log(s->avctx, AV_LOG_ERROR, "initial skip overflow\n"); + return -1; + } + s->resync_mb_x= s->mb_x; - s->resync_mb_y= s->mb_y = start_code; + s->resync_mb_y= s->mb_y= mb_y; s->mb_skip_run= 0; ff_init_block_index(s); + if (s->mb_y==0 && s->mb_x==0 && (s->first_field || s->picture_structure==PICT_FRAME)) { + if(s->avctx->debug&FF_DEBUG_PICT_INFO){ + av_log(s->avctx, AV_LOG_DEBUG, "qp:%d fc:%2d%2d%2d%2d %s %s %s %s %s dc:%d pstruct:%d fdct:%d cmv:%d qtype:%d ivlc:%d rff:%d %s\n", + s->qscale, s->mpeg_f_code[0][0],s->mpeg_f_code[0][1],s->mpeg_f_code[1][0],s->mpeg_f_code[1][1], + s->pict_type == FF_I_TYPE ? "I" : (s->pict_type == FF_P_TYPE ? "P" : (s->pict_type == FF_B_TYPE ? "B" : "S")), + s->progressive_sequence ? "ps" :"", s->progressive_frame ? "pf" : "", s->alternate_scan ? "alt" :"", s->top_field_first ? "top" :"", + s->intra_dc_precision, s->picture_structure, s->frame_pred_frame_dct, s->concealment_motion_vectors, + s->q_scale_type, s->intra_vlc_format, s->repeat_first_field, s->chroma_420_type ? "420" :""); + } + } + for(;;) { #ifdef HAVE_XVMC - //one 1 we memcpy blocks in xvmcvideo + //If 1, we memcpy blocks in xvmcvideo. if(s->avctx->xvmc_acceleration > 1) XVMC_init_block(s);//set s->block #endif - s->dsp.clear_blocks(s->block[0]); - - ret = mpeg_decode_mb(s, s->block); - s->chroma_qscale= s->qscale; - - dprintf("ret=%d\n", ret); - if (ret < 0) + if(mpeg_decode_mb(s, s->block) < 0) return -1; if(s->current_picture.motion_val[0] && !s->encoding){ //note motion_val is normally NULL unless we want to extract the MVs - const int wrap = field_pic ? 2*s->block_wrap[0] : s->block_wrap[0]; - int xy = s->mb_x*2 + 1 + (s->mb_y*2 +1)*wrap; - int motion_for_top_x, motion_for_top_y, motion_back_top_x, motion_back_top_y; - int motion_for_bottom_x, motion_for_bottom_y, motion_back_bottom_x, motion_back_bottom_y; + const int wrap = field_pic ? 2*s->b8_stride : s->b8_stride; + int xy = s->mb_x*2 + s->mb_y*2*wrap; + int motion_x, motion_y, dir, i; if(field_pic && !s->first_field) xy += wrap/2; - if (s->mb_intra) { - motion_for_top_x = motion_for_top_y = motion_back_top_x = motion_back_top_y = - motion_for_bottom_x = motion_for_bottom_y = motion_back_bottom_x = motion_back_bottom_y = 0; - }else if (s->mv_type == MV_TYPE_16X16){ - motion_for_top_x = motion_for_bottom_x = s->mv[0][0][0]; - motion_for_top_y = motion_for_bottom_y = s->mv[0][0][1]; - motion_back_top_x = motion_back_bottom_x = s->mv[1][0][0]; - motion_back_top_y = motion_back_bottom_y = s->mv[1][0][1]; - } else /*if ((s->mv_type == MV_TYPE_FIELD) || (s->mv_type == MV_TYPE_16X8))*/ { - motion_for_top_x = s->mv[0][0][0]; - motion_for_top_y = s->mv[0][0][1]; - motion_for_bottom_x = s->mv[0][1][0]; - motion_for_bottom_y = s->mv[0][1][1]; - motion_back_top_x = s->mv[1][0][0]; - motion_back_top_y = s->mv[1][0][1]; - motion_back_bottom_x = s->mv[1][1][0]; - motion_back_bottom_y = s->mv[1][1][1]; + for(i=0; i<2; i++){ + for(dir=0; dir<2; dir++){ + if (s->mb_intra || (dir==1 && s->pict_type != FF_B_TYPE)) { + motion_x = motion_y = 0; + }else if (s->mv_type == MV_TYPE_16X16 || (s->mv_type == MV_TYPE_FIELD && field_pic)){ + motion_x = s->mv[dir][0][0]; + motion_y = s->mv[dir][0][1]; + } else /*if ((s->mv_type == MV_TYPE_FIELD) || (s->mv_type == MV_TYPE_16X8))*/ { + motion_x = s->mv[dir][i][0]; + motion_y = s->mv[dir][i][1]; + } + + s->current_picture.motion_val[dir][xy ][0] = motion_x; + s->current_picture.motion_val[dir][xy ][1] = motion_y; + s->current_picture.motion_val[dir][xy + 1][0] = motion_x; + s->current_picture.motion_val[dir][xy + 1][1] = motion_y; + s->current_picture.ref_index [dir][xy ]= + s->current_picture.ref_index [dir][xy + 1]= s->field_select[dir][i]; + assert(s->field_select[dir][i]==0 || s->field_select[dir][i]==1); + } + xy += wrap; } - - s->current_picture.motion_val[0][xy][0] = motion_for_top_x; - s->current_picture.motion_val[0][xy][1] = motion_for_top_y; - s->current_picture.motion_val[0][xy + 1][0] = motion_for_top_x; - s->current_picture.motion_val[0][xy + 1][1] = motion_for_top_y; - s->current_picture.motion_val[0][xy + wrap][0] = motion_for_bottom_x; - s->current_picture.motion_val[0][xy + wrap][1] = motion_for_bottom_y; - s->current_picture.motion_val[0][xy + 1 + wrap][0] = motion_for_bottom_x; - s->current_picture.motion_val[0][xy + 1 + wrap][1] = motion_for_bottom_y; - - if(s->pict_type != B_TYPE){ - motion_back_top_x = motion_back_top_y = motion_back_bottom_x = motion_back_bottom_y = 0; - } - - s->current_picture.motion_val[1][xy][0] = motion_back_top_x; - s->current_picture.motion_val[1][xy][1] = motion_back_top_y; - s->current_picture.motion_val[1][xy + 1][0] = motion_back_top_x; - s->current_picture.motion_val[1][xy + 1][1] = motion_back_top_y; - s->current_picture.motion_val[1][xy + wrap][0] = motion_back_bottom_x; - s->current_picture.motion_val[1][xy + wrap][1] = motion_back_bottom_y; - s->current_picture.motion_val[1][xy + 1 + wrap][0] = motion_back_bottom_x; - s->current_picture.motion_val[1][xy + 1 + wrap][1] = motion_back_bottom_y; } - s->dest[0] += 16; - s->dest[1] += 8; - s->dest[2] += 8; + s->dest[0] += 16 >> lowres; + s->dest[1] +=(16 >> lowres) >> s->chroma_x_shift; + s->dest[2] +=(16 >> lowres) >> s->chroma_x_shift; MPV_decode_mb(s, s->block); - - if (++s->mb_x >= s->mb_width) { - ff_draw_horiz_band(s, 16*s->mb_y, 16); + if (++s->mb_x >= s->mb_width) { + const int mb_size= 16>>s->avctx->lowres; + + ff_draw_horiz_band(s, mb_size*s->mb_y, mb_size); s->mb_x = 0; s->mb_y++; if(s->mb_y<= s->mb_height){ int left= s->gb.size_in_bits - get_bits_count(&s->gb); + int is_d10= s->chroma_format==2 && s->pict_type==FF_I_TYPE && avctx->profile==0 && avctx->level==5 + && s->intra_dc_precision == 2 && s->q_scale_type == 1 && s->alternate_scan == 0 + && s->progressive_frame == 0 /* vbv_delay == 0xBBB || 0xE10*/; - if(left < 0 || (left && show_bits(&s->gb, FFMIN(left, 23))) + if(left < 0 || (left && show_bits(&s->gb, FFMIN(left, 23)) && !is_d10) || (avctx->error_resilience >= FF_ER_AGGRESSIVE && left>8)){ - av_log(avctx, AV_LOG_ERROR, "end mismatch left=%d\n", left); + av_log(avctx, AV_LOG_ERROR, "end mismatch left=%d %0X\n", left, show_bits(&s->gb, FFMIN(left, 23))); return -1; }else goto eos; } - + ff_init_block_index(s); } /* skip mb handling */ if (s->mb_skip_run == -1) { - /* read again increment */ + /* read increment again */ s->mb_skip_run = 0; for(;;) { int code = get_vlc2(&s->gb, mbincr_vlc.table, MBINCR_VLC_BITS, 2); @@ -2251,23 +1813,88 @@ static int mpeg_decode_slice(AVCodecContext *avctx, break; } } + if(s->mb_skip_run){ + int i; + if(s->pict_type == FF_I_TYPE){ + av_log(s->avctx, AV_LOG_ERROR, "skipped MB in I frame at %d %d\n", s->mb_x, s->mb_y); + return -1; + } + + /* skip mb */ + s->mb_intra = 0; + for(i=0;i<12;i++) + s->block_last_index[i] = -1; + if(s->picture_structure == PICT_FRAME) + s->mv_type = MV_TYPE_16X16; + else + s->mv_type = MV_TYPE_FIELD; + if (s->pict_type == FF_P_TYPE) { + /* if P type, zero motion vector is implied */ + s->mv_dir = MV_DIR_FORWARD; + s->mv[0][0][0] = s->mv[0][0][1] = 0; + s->last_mv[0][0][0] = s->last_mv[0][0][1] = 0; + s->last_mv[0][1][0] = s->last_mv[0][1][1] = 0; + s->field_select[0][0]= s->picture_structure - 1; + } else { + /* if B type, reuse previous vectors and directions */ + s->mv[0][0][0] = s->last_mv[0][0][0]; + s->mv[0][0][1] = s->last_mv[0][0][1]; + s->mv[1][0][0] = s->last_mv[1][0][0]; + s->mv[1][0][1] = s->last_mv[1][0][1]; + } + } } } eos: // end of slice - *buf += get_bits_count(&s->gb)/8 - 1; + *buf += (get_bits_count(&s->gb)-1)/8; //printf("y %d %d %d %d\n", s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y); return 0; } +static int slice_decode_thread(AVCodecContext *c, void *arg){ + MpegEncContext *s= arg; + const uint8_t *buf= s->gb.buffer; + int mb_y= s->start_mb_y; + + s->error_count= 3*(s->end_mb_y - s->start_mb_y)*s->mb_width; + + for(;;){ + uint32_t start_code; + int ret; + + ret= mpeg_decode_slice((Mpeg1Context*)s, mb_y, &buf, s->gb.buffer_end - buf); + emms_c(); +//av_log(c, AV_LOG_DEBUG, "ret:%d resync:%d/%d mb:%d/%d ts:%d/%d ec:%d\n", +//ret, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, s->start_mb_y, s->end_mb_y, s->error_count); + if(ret < 0){ + if(s->resync_mb_x>=0 && s->resync_mb_y>=0) + ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, AC_ERROR|DC_ERROR|MV_ERROR); + }else{ + ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, AC_END|DC_END|MV_END); + } + + if(s->mb_y == s->end_mb_y) + return 0; + + start_code= -1; + buf = ff_find_start_code(buf, s->gb.buffer_end, &start_code); + mb_y= start_code - SLICE_MIN_START_CODE; + if(mb_y < 0 || mb_y >= s->end_mb_y) + return -1; + } + + return 0; //not reached +} + /** - * handles slice ends. - * @return 1 if it seems to be the last slice of + * Handles slice ends. + * @return 1 if it seems to be the last slice */ static int slice_end(AVCodecContext *avctx, AVFrame *pict) { Mpeg1Context *s1 = avctx->priv_data; MpegEncContext *s = &s1->mpeg_enc_ctx; - + if (!s1->mpeg_enc_ctx_allocated || !s->current_picture_ptr) return 0; @@ -2285,12 +1912,12 @@ static int slice_end(AVCodecContext *avctx, AVFrame *pict) MPV_frame_end(s); - if (s->pict_type == B_TYPE || s->low_delay) { + if (s->pict_type == FF_B_TYPE || s->low_delay) { *pict= *(AVFrame*)s->current_picture_ptr; ff_print_debug_info(s, pict); } else { s->picture_number++; - /* latency of 1 frame for I and P frames */ + /* latency of 1 frame for I- and P-frames */ /* XXX: use another variable than picture_number */ if (s->last_picture_ptr != NULL) { *pict= *(AVFrame*)s->last_picture_ptr; @@ -2304,65 +1931,34 @@ static int slice_end(AVCodecContext *avctx, AVFrame *pict) } } -static int mpeg1_decode_sequence(AVCodecContext *avctx, - uint8_t *buf, int buf_size) +static int mpeg1_decode_sequence(AVCodecContext *avctx, + const uint8_t *buf, int buf_size) { Mpeg1Context *s1 = avctx->priv_data; MpegEncContext *s = &s1->mpeg_enc_ctx; - int width, height, i, v, j; - float aspect; + int width,height; + int i, v, j; init_get_bits(&s->gb, buf, buf_size*8); width = get_bits(&s->gb, 12); height = get_bits(&s->gb, 12); - s->aspect_ratio_info= get_bits(&s->gb, 4); - if (s->aspect_ratio_info == 0) + if (width <= 0 || height <= 0) return -1; - aspect= 1.0/mpeg1_aspect[s->aspect_ratio_info]; - avctx->sample_aspect_ratio= av_d2q(aspect, 255); - + s->aspect_ratio_info= get_bits(&s->gb, 4); + if (s->aspect_ratio_info == 0) { + av_log(avctx, AV_LOG_ERROR, "aspect ratio has forbidden 0 value\n"); + if (avctx->error_resilience >= FF_ER_COMPLIANT) + return -1; + } s->frame_rate_index = get_bits(&s->gb, 4); - if (s->frame_rate_index == 0) + if (s->frame_rate_index == 0 || s->frame_rate_index > 13) return -1; s->bit_rate = get_bits(&s->gb, 18) * 400; if (get_bits1(&s->gb) == 0) /* marker */ return -1; - if (width <= 0 || height <= 0 || - (width % 2) != 0 || (height % 2) != 0) - return -1; - if (width != s->width || - height != s->height) { - /* start new mpeg1 context decoding */ - s->out_format = FMT_MPEG1; - if (s1->mpeg_enc_ctx_allocated) { - MPV_common_end(s); - } - s->width = width; - s->height = height; - avctx->has_b_frames= 1; - avctx->width = width; - avctx->height = height; - av_reduce( - &avctx->frame_rate, - &avctx->frame_rate_base, - frame_rate_tab[s->frame_rate_index], - MPEG1_FRAME_RATE_BASE, //FIXME store in allready reduced form - 1<<30 - ); - avctx->bit_rate = s->bit_rate; - - //get_format() or set_video(width,height,aspect,pix_fmt); - //until then pix_fmt may be changed right after codec init - if( avctx->pix_fmt == PIX_FMT_XVMC_MPEG2_IDCT ) - if( avctx->idct_algo == FF_IDCT_AUTO ) - avctx->idct_algo = FF_IDCT_SIMPLE; - - if (MPV_common_init(s) < 0) - return -1; - s1->mpeg_enc_ctx_allocated = 1; - s->swap_uv = 0;//just in case vcr2 and mpeg2 stream have been concatinated - } + s->width = width; + s->height = height; s->avctx->rc_buffer_size= get_bits(&s->gb, 10) * 1024*16; skip_bits(&s->gb, 1); @@ -2371,19 +1967,23 @@ static int mpeg1_decode_sequence(AVCodecContext *avctx, if (get_bits1(&s->gb)) { for(i=0;i<64;i++) { v = get_bits(&s->gb, 8); - j = s->intra_scantable.permutated[i]; + if(v==0){ + av_log(s->avctx, AV_LOG_ERROR, "intra matrix damaged\n"); + return -1; + } + j = s->dsp.idct_permutation[ ff_zigzag_direct[i] ]; s->intra_matrix[j] = v; s->chroma_intra_matrix[j] = v; } #ifdef DEBUG - dprintf("intra matrix present\n"); + dprintf(s->avctx, "intra matrix present\n"); for(i=0;i<64;i++) - dprintf(" %d", s->intra_matrix[s->intra_scantable.permutated[i]]); - printf("\n"); + dprintf(s->avctx, " %d", s->intra_matrix[s->dsp.idct_permutation[i]]); + dprintf(s->avctx, "\n"); #endif } else { for(i=0;i<64;i++) { - int j= s->dsp.idct_permutation[i]; + j = s->dsp.idct_permutation[i]; v = ff_mpeg1_default_intra_matrix[i]; s->intra_matrix[j] = v; s->chroma_intra_matrix[j] = v; @@ -2392,15 +1992,19 @@ static int mpeg1_decode_sequence(AVCodecContext *avctx, if (get_bits1(&s->gb)) { for(i=0;i<64;i++) { v = get_bits(&s->gb, 8); - j = s->intra_scantable.permutated[i]; + if(v==0){ + av_log(s->avctx, AV_LOG_ERROR, "inter matrix damaged\n"); + return -1; + } + j = s->dsp.idct_permutation[ ff_zigzag_direct[i] ]; s->inter_matrix[j] = v; s->chroma_inter_matrix[j] = v; } #ifdef DEBUG - dprintf("non intra matrix present\n"); + dprintf(s->avctx, "non-intra matrix present\n"); for(i=0;i<64;i++) - dprintf(" %d", s->inter_matrix[s->intra_scantable.permutated[i]]); - printf("\n"); + dprintf(s->avctx, " %d", s->inter_matrix[s->dsp.idct_permutation[i]]); + dprintf(s->avctx, "\n"); #endif } else { for(i=0;i<64;i++) { @@ -2411,19 +2015,27 @@ static int mpeg1_decode_sequence(AVCodecContext *avctx, } } - /* we set mpeg2 parameters so that it emulates mpeg1 */ + if(show_bits(&s->gb, 23) != 0){ + av_log(s->avctx, AV_LOG_ERROR, "sequence header damaged\n"); + return -1; + } + + /* we set MPEG-2 parameters so that it emulates MPEG-1 */ s->progressive_sequence = 1; s->progressive_frame = 1; s->picture_structure = PICT_FRAME; s->frame_pred_frame_dct = 1; + s->chroma_format = 1; s->codec_id= s->avctx->codec_id= CODEC_ID_MPEG1VIDEO; - avctx->sub_id = 1; /* indicates mpeg1 */ + avctx->sub_id = 1; /* indicates MPEG-1 */ + s->out_format = FMT_MPEG1; + s->swap_uv = 0;//AFAIK VCR2 does not have SEQ_HEADER if(s->flags & CODEC_FLAG_LOW_DELAY) s->low_delay=1; - + if(s->avctx->debug & FF_DEBUG_PICT_INFO) - av_log(s->avctx, AV_LOG_DEBUG, "vbv buffer: %d, bitrate:%d\n", + av_log(s->avctx, AV_LOG_DEBUG, "vbv buffer: %d, bitrate:%d\n", s->avctx->rc_buffer_size, s->bit_rate); - + return 0; } @@ -2433,26 +2045,30 @@ static int vcr2_init_sequence(AVCodecContext *avctx) MpegEncContext *s = &s1->mpeg_enc_ctx; int i, v; - /* start new mpeg1 context decoding */ + /* start new MPEG-1 context decoding */ s->out_format = FMT_MPEG1; if (s1->mpeg_enc_ctx_allocated) { MPV_common_end(s); } - s->width = avctx->width; - s->height = avctx->height; + s->width = avctx->coded_width; + s->height = avctx->coded_height; avctx->has_b_frames= 0; //true? s->low_delay= 1; - //get_format() or set_video(width,height,aspect,pix_fmt); - //until then pix_fmt may be changed right after codec init + if(avctx->xvmc_acceleration){ + avctx->pix_fmt = avctx->get_format(avctx,pixfmt_xvmc_mpg2_420); + }else{ + avctx->pix_fmt = avctx->get_format(avctx,pixfmt_yuv_420); + } + if( avctx->pix_fmt == PIX_FMT_XVMC_MPEG2_IDCT ) if( avctx->idct_algo == FF_IDCT_AUTO ) avctx->idct_algo = FF_IDCT_SIMPLE; - + if (MPV_common_init(s) < 0) return -1; exchange_uv(s);//common init reset pblocks, so we swap them here - s->swap_uv = 1;// in case of xvmc we need to swap uv for each MB + s->swap_uv = 1;// in case of xvmc we need to swap uv for each MB s1->mpeg_enc_ctx_allocated = 1; for(i=0;i<64;i++) { @@ -2470,13 +2086,14 @@ static int vcr2_init_sequence(AVCodecContext *avctx) s->progressive_frame = 1; s->picture_structure = PICT_FRAME; s->frame_pred_frame_dct = 1; + s->chroma_format = 1; s->codec_id= s->avctx->codec_id= CODEC_ID_MPEG2VIDEO; - avctx->sub_id = 2; /* indicates mpeg2 */ + avctx->sub_id = 2; /* indicates MPEG-2 */ return 0; } -static void mpeg_decode_user_data(AVCodecContext *avctx, +static void mpeg_decode_user_data(AVCodecContext *avctx, const uint8_t *buf, int buf_size) { const uint8_t *p; @@ -2505,80 +2122,129 @@ static void mpeg_decode_user_data(AVCodecContext *avctx, } } +static void mpeg_decode_gop(AVCodecContext *avctx, + const uint8_t *buf, int buf_size){ + Mpeg1Context *s1 = avctx->priv_data; + MpegEncContext *s = &s1->mpeg_enc_ctx; + + int drop_frame_flag; + int time_code_hours, time_code_minutes; + int time_code_seconds, time_code_pictures; + int closed_gop, broken_link; + + init_get_bits(&s->gb, buf, buf_size*8); + + drop_frame_flag = get_bits1(&s->gb); + + time_code_hours=get_bits(&s->gb,5); + time_code_minutes = get_bits(&s->gb,6); + skip_bits1(&s->gb);//marker bit + time_code_seconds = get_bits(&s->gb,6); + time_code_pictures = get_bits(&s->gb,6); + + closed_gop = get_bits1(&s->gb); + /*broken_link indicate that after editing the + reference frames of the first B-Frames after GOP I-Frame + are missing (open gop)*/ + broken_link = get_bits1(&s->gb); + + if(s->avctx->debug & FF_DEBUG_PICT_INFO) + av_log(s->avctx, AV_LOG_DEBUG, "GOP (%2d:%02d:%02d.[%02d]) closed_gop=%d broken_link=%d\n", + time_code_hours, time_code_minutes, time_code_seconds, + time_code_pictures, closed_gop, broken_link); +} /** - * finds the end of the current frame in the bitstream. + * Finds the end of the current frame in the bitstream. * @return the position of the first byte of the next frame, or -1 */ -static int mpeg1_find_frame_end(MpegEncContext *s, uint8_t *buf, int buf_size){ - ParseContext *pc= &s->parse_context; +int ff_mpeg1_find_frame_end(ParseContext *pc, const uint8_t *buf, int buf_size) +{ int i; - uint32_t state; - - state= pc->state; - - i=0; - if(!pc->frame_start_found){ - for(i=0; i= SLICE_MIN_START_CODE && state <= SLICE_MAX_START_CODE){ - i++; - pc->frame_start_found=1; - break; + uint32_t state= pc->state; + + /* EOF considered as end of frame */ + if (buf_size == 0) + return 0; + +/* + 0 frame start -> 1/4 + 1 first_SEQEXT -> 0/2 + 2 first field start -> 3/0 + 3 second_SEQEXT -> 2/0 + 4 searching end +*/ + + for(i=0; iframe_start_found>=0 && pc->frame_start_found<=4); + if(pc->frame_start_found&1){ + if(state == EXT_START_CODE && (buf[i]&0xF0) != 0x80) + pc->frame_start_found--; + else if(state == EXT_START_CODE+2){ + if((buf[i]&3) == 3) pc->frame_start_found= 0; + else pc->frame_start_found= (pc->frame_start_found+1)&3; } - } - } - - if(pc->frame_start_found){ - for(; iframe_start_found==0 && state >= SLICE_MIN_START_CODE && state <= SLICE_MAX_START_CODE){ + i++; + pc->frame_start_found=4; + } + if(state == SEQ_END_CODE){ + pc->state=-1; + return i+1; + } + if(pc->frame_start_found==2 && state == SEQ_START_CODE) + pc->frame_start_found= 0; + if(pc->frame_start_found<4 && state == EXT_START_CODE) + pc->frame_start_found++; + if(pc->frame_start_found == 4 && (state&0xFFFFFF00) == 0x100){ if(state < SLICE_MIN_START_CODE || state > SLICE_MAX_START_CODE){ pc->frame_start_found=0; - pc->state=-1; + pc->state=-1; return i-3; } } } - } + } pc->state= state; return END_NOT_FOUND; } +static int decode_chunks(AVCodecContext *avctx, + AVFrame *picture, int *data_size, + const uint8_t *buf, int buf_size); + /* handle buffering and image synchronisation */ -static int mpeg_decode_frame(AVCodecContext *avctx, +static int mpeg_decode_frame(AVCodecContext *avctx, void *data, int *data_size, - uint8_t *buf, int buf_size) + const uint8_t *buf, int buf_size) { Mpeg1Context *s = avctx->priv_data; - uint8_t *buf_end, *buf_ptr; - int ret, start_code, input_size; AVFrame *picture = data; MpegEncContext *s2 = &s->mpeg_enc_ctx; - dprintf("fill_buffer\n"); + dprintf(avctx, "fill_buffer\n"); - *data_size = 0; + if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == SEQ_END_CODE)) { + /* special case for last picture */ + if (s2->low_delay==0 && s2->next_picture_ptr) { + *picture= *(AVFrame*)s2->next_picture_ptr; + s2->next_picture_ptr= NULL; - /* special case for last picture */ - if (buf_size == 0 && s2->low_delay==0 && s2->next_picture_ptr) { - *picture= *(AVFrame*)s2->next_picture_ptr; - s2->next_picture_ptr= NULL; - - *data_size = sizeof(AVFrame); - return 0; + *data_size = sizeof(AVFrame); + } + return buf_size; } if(s2->flags&CODEC_FLAG_TRUNCATED){ - int next= mpeg1_find_frame_end(s2, buf, buf_size); - - if( ff_combine_frame(s2, next, &buf, &buf_size) < 0 ) - return buf_size; - } - - buf_ptr = buf; - buf_end = buf + buf_size; + int next= ff_mpeg1_find_frame_end(&s2->parse_context, buf, buf_size); -#if 0 - if (s->repeat_field % 2 == 1) { + if( ff_combine_frame(&s2->parse_context, next, (const uint8_t **)&buf, &buf_size) < 0 ) + return buf_size; + } + +#if 0 + if (s->repeat_field % 2 == 1) { s->repeat_field++; //fprintf(stderr,"\nRepeating last frame: %d -> %d! pict: %d %d", avctx->frame_number-1, avctx->frame_number, // s2->picture_number, s->repeat_field); @@ -2592,11 +2258,37 @@ static int mpeg_decode_frame(AVCodecContext *avctx, if(s->mpeg_enc_ctx_allocated==0 && avctx->codec_tag == ff_get_fourcc("VCR2")) vcr2_init_sequence(avctx); + s->slice_count= 0; + + if(avctx->extradata && !avctx->frame_number) + decode_chunks(avctx, picture, data_size, avctx->extradata, avctx->extradata_size); + + return decode_chunks(avctx, picture, data_size, buf, buf_size); +} + +static int decode_chunks(AVCodecContext *avctx, + AVFrame *picture, int *data_size, + const uint8_t *buf, int buf_size) +{ + Mpeg1Context *s = avctx->priv_data; + MpegEncContext *s2 = &s->mpeg_enc_ctx; + const uint8_t *buf_ptr = buf; + const uint8_t *buf_end = buf + buf_size; + int ret, input_size; + for(;;) { - /* find start next code */ - start_code = find_start_code(&buf_ptr, buf_end); - if (start_code < 0){ - if(s2->pict_type != B_TYPE || avctx->hurry_up==0){ + /* find next start code */ + uint32_t start_code = -1; + buf_ptr = ff_find_start_code(buf_ptr,buf_end, &start_code); + if (start_code > 0x1ff){ + if(s2->pict_type != FF_B_TYPE || avctx->skip_frame <= AVDISCARD_DEFAULT){ + if(avctx->thread_count > 1){ + int i; + + avctx->execute(avctx, slice_decode_thread, (void**)&(s2->thread_context[0]), NULL, s->slice_count); + for(i=0; islice_count; i++) + s2->error_count += s2->thread_context[i]->error_count; + } if (slice_end(avctx, picture)) { if(s2->last_picture_ptr || s2->low_delay) //FIXME merge with the stuff in mpeg_decode_slice *data_size = sizeof(AVPicture); @@ -2604,63 +2296,106 @@ static int mpeg_decode_frame(AVCodecContext *avctx, } return FFMAX(0, buf_ptr - buf - s2->parse_context.last_index); } - + input_size = buf_end - buf_ptr; if(avctx->debug & FF_DEBUG_STARTCODE){ - av_log(avctx, AV_LOG_DEBUG, "%3X at %d left %d\n", start_code, buf_ptr-buf, input_size); + av_log(avctx, AV_LOG_DEBUG, "%3X at %td left %d\n", start_code, buf_ptr-buf, input_size); } - /* prepare data for next start code */ - switch(start_code) { - case SEQ_START_CODE: - mpeg1_decode_sequence(avctx, buf_ptr, - input_size); - break; - - case PICTURE_START_CODE: - /* we have a complete image : we try to decompress it */ - mpeg1_decode_picture(avctx, - buf_ptr, input_size); - break; - case EXT_START_CODE: - mpeg_decode_extension(avctx, - buf_ptr, input_size); - break; - case USER_START_CODE: - mpeg_decode_user_data(avctx, - buf_ptr, input_size); - break; - case GOP_START_CODE: - s2->first_field=0; - break; - default: - if (start_code >= SLICE_MIN_START_CODE && - start_code <= SLICE_MAX_START_CODE) { - - /* skip b frames if we dont have reference frames */ - if(s2->last_picture_ptr==NULL && s2->pict_type==B_TYPE) break; - /* skip b frames if we are in a hurry */ - if(avctx->hurry_up && s2->pict_type==B_TYPE) break; - /* skip everything if we are in a hurry>=5 */ - if(avctx->hurry_up>=5) break; - - if (!s->mpeg_enc_ctx_allocated) break; + /* prepare data for next start code */ + switch(start_code) { + case SEQ_START_CODE: + mpeg1_decode_sequence(avctx, buf_ptr, + input_size); + break; - ret = mpeg_decode_slice(avctx, picture, - start_code, &buf_ptr, input_size); - emms_c(); + case PICTURE_START_CODE: + /* we have a complete image: we try to decompress it */ + mpeg1_decode_picture(avctx, + buf_ptr, input_size); + break; + case EXT_START_CODE: + mpeg_decode_extension(avctx, + buf_ptr, input_size); + break; + case USER_START_CODE: + mpeg_decode_user_data(avctx, + buf_ptr, input_size); + break; + case GOP_START_CODE: + s2->first_field=0; + mpeg_decode_gop(avctx, + buf_ptr, input_size); + break; + default: + if (start_code >= SLICE_MIN_START_CODE && + start_code <= SLICE_MAX_START_CODE) { + int mb_y= start_code - SLICE_MIN_START_CODE; - if(ret < 0){ - if(s2->resync_mb_x>=0 && s2->resync_mb_y>=0) - ff_er_add_slice(s2, s2->resync_mb_x, s2->resync_mb_y, s2->mb_x, s2->mb_y, AC_ERROR|DC_ERROR|MV_ERROR); - if(ret==DECODE_SLICE_FATAL_ERROR) return -1; - }else{ - ff_er_add_slice(s2, s2->resync_mb_x, s2->resync_mb_y, s2->mb_x-1, s2->mb_y, AC_END|DC_END|MV_END); - } - } - break; + if(s2->last_picture_ptr==NULL){ + /* Skip B-frames if we do not have reference frames. */ + if(s2->pict_type==FF_B_TYPE) break; } + if(s2->next_picture_ptr==NULL){ + /* Skip P-frames if we do not have a reference frame or we have an invalid header. */ + if(s2->pict_type==FF_P_TYPE && (s2->first_field || s2->picture_structure==PICT_FRAME)) break; + } + /* Skip B-frames if we are in a hurry. */ + if(avctx->hurry_up && s2->pict_type==FF_B_TYPE) break; + if( (avctx->skip_frame >= AVDISCARD_NONREF && s2->pict_type==FF_B_TYPE) + ||(avctx->skip_frame >= AVDISCARD_NONKEY && s2->pict_type!=FF_I_TYPE) + || avctx->skip_frame >= AVDISCARD_ALL) + break; + /* Skip everything if we are in a hurry>=5. */ + if(avctx->hurry_up>=5) break; + + if (!s->mpeg_enc_ctx_allocated) break; + + if(s2->codec_id == CODEC_ID_MPEG2VIDEO){ + if(mb_y < avctx->skip_top || mb_y >= s2->mb_height - avctx->skip_bottom) + break; + } + + if(s2->first_slice){ + s2->first_slice=0; + if(mpeg_field_start(s2) < 0) + return -1; + } + if(!s2->current_picture_ptr){ + av_log(avctx, AV_LOG_ERROR, "current_picture not initialized\n"); + return -1; + } + + if(avctx->thread_count > 1){ + int threshold= (s2->mb_height*s->slice_count + avctx->thread_count/2) / avctx->thread_count; + if(threshold <= mb_y){ + MpegEncContext *thread_context= s2->thread_context[s->slice_count]; + + thread_context->start_mb_y= mb_y; + thread_context->end_mb_y = s2->mb_height; + if(s->slice_count){ + s2->thread_context[s->slice_count-1]->end_mb_y= mb_y; + ff_update_duplicate_context(thread_context, s2); + } + init_get_bits(&thread_context->gb, buf_ptr, input_size*8); + s->slice_count++; + } + buf_ptr += 2; //FIXME add minimum number of bytes per slice + }else{ + ret = mpeg_decode_slice(s, mb_y, &buf_ptr, input_size); + emms_c(); + + if(ret < 0){ + if(s2->resync_mb_x>=0 && s2->resync_mb_y>=0) + ff_er_add_slice(s2, s2->resync_mb_x, s2->resync_mb_y, s2->mb_x, s2->mb_y, AC_ERROR|DC_ERROR|MV_ERROR); + }else{ + ff_er_add_slice(s2, s2->resync_mb_x, s2->resync_mb_y, s2->mb_x-1, s2->mb_y, AC_END|DC_END|MV_END); + } + } + } + break; + } } } @@ -2682,8 +2417,9 @@ AVCodec mpeg1video_decoder = { NULL, mpeg_decode_end, mpeg_decode_frame, - CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED, + CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY, .flush= ff_mpeg_flush, + .long_name= NULL_IF_CONFIG_SMALL("MPEG-1 video"), }; AVCodec mpeg2video_decoder = { @@ -2695,8 +2431,9 @@ AVCodec mpeg2video_decoder = { NULL, mpeg_decode_end, mpeg_decode_frame, - CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED, + CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY, .flush= ff_mpeg_flush, + .long_name= NULL_IF_CONFIG_SMALL("MPEG-2 video"), }; //legacy decoder @@ -2709,44 +2446,21 @@ AVCodec mpegvideo_decoder = { NULL, mpeg_decode_end, mpeg_decode_frame, - CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED, + CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY, .flush= ff_mpeg_flush, + .long_name= NULL_IF_CONFIG_SMALL("MPEG-1 video"), }; -#ifdef CONFIG_ENCODERS - -AVCodec mpeg1video_encoder = { - "mpeg1video", - CODEC_TYPE_VIDEO, - CODEC_ID_MPEG1VIDEO, - sizeof(MpegEncContext), - encode_init, - MPV_encode_picture, - MPV_encode_end, -}; - -#ifdef CONFIG_RISKY - -AVCodec mpeg2video_encoder = { - "mpeg2video", - CODEC_TYPE_VIDEO, - CODEC_ID_MPEG2VIDEO, - sizeof(MpegEncContext), - encode_init, - MPV_encode_picture, - MPV_encode_end, -}; -#endif -#endif - #ifdef HAVE_XVMC -static int mpeg_mc_decode_init(AVCodecContext *avctx){ +static av_cold int mpeg_mc_decode_init(AVCodecContext *avctx){ Mpeg1Context *s; + if( avctx->thread_count > 1) + return -1; if( !(avctx->slice_flags & SLICE_FLAG_CODED_ORDER) ) return -1; if( !(avctx->slice_flags & SLICE_FLAG_ALLOW_FIELD) ){ - dprintf("mpeg12.c: XvMC decoder will work better if SLICE_FLAG_ALLOW_FIELD is set\n"); + dprintf(avctx, "mpeg12.c: XvMC decoder will work better if SLICE_FLAG_ALLOW_FIELD is set\n"); } mpeg_decode_init(avctx); s = avctx->priv_data; @@ -2766,12 +2480,9 @@ AVCodec mpeg_xvmc_decoder = { NULL, mpeg_decode_end, mpeg_decode_frame, - CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED, + CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED| CODEC_CAP_HWACCEL | CODEC_CAP_DELAY, + .flush= ff_mpeg_flush, + .long_name = NULL_IF_CONFIG_SMALL("MPEG-1 video XvMC (X-Video Motion Compensation)"), }; #endif - -/* this is ugly i know, but the alternative is too make - hundreds of vars global and prefix them with ff_mpeg1_ - which is far uglier. */ -#include "mdec.c" diff --git a/src/add-ons/media/plugins/avcodec/libavcodec/mpeg12.h b/src/add-ons/media/plugins/avcodec/libavcodec/mpeg12.h new file mode 100644 index 0000000000..1e919b4d66 --- /dev/null +++ b/src/add-ons/media/plugins/avcodec/libavcodec/mpeg12.h @@ -0,0 +1,59 @@ +/* + * MPEG1/2 common code + * Copyright (c) 2007 Aurelien Jacobs + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef FFMPEG_MPEG12_H +#define FFMPEG_MPEG12_H + +#include "mpegvideo.h" + +#define DC_VLC_BITS 9 +#define TEX_VLC_BITS 9 + +static VLC dc_lum_vlc; +static VLC dc_chroma_vlc; + +extern uint8_t ff_mpeg12_static_rl_table_store[2][2][2*MAX_RUN + MAX_LEVEL + 3]; + +void ff_mpeg12_common_init(MpegEncContext *s); +void ff_mpeg12_init_vlcs(void); + +static inline int decode_dc(GetBitContext *gb, int component) +{ + int code, diff; + + if (component == 0) { + code = get_vlc2(gb, dc_lum_vlc.table, DC_VLC_BITS, 2); + } else { + code = get_vlc2(gb, dc_chroma_vlc.table, DC_VLC_BITS, 2); + } + if (code < 0){ + av_log(NULL, AV_LOG_ERROR, "invalid dc code at\n"); + return 0xffff; + } + if (code == 0) { + diff = 0; + } else { + diff = get_xbits(gb, code); + } + return diff; +} + +#endif /* FFMPEG_MPEG12_H */ diff --git a/src/add-ons/media/plugins/avcodec/libavcodec/mpeg12data.c b/src/add-ons/media/plugins/avcodec/libavcodec/mpeg12data.c new file mode 100644 index 0000000000..72324a729e --- /dev/null +++ b/src/add-ons/media/plugins/avcodec/libavcodec/mpeg12data.c @@ -0,0 +1,366 @@ +/* + * MPEG1/2 tables + * copyright (c) 2000,2001 Fabrice Bellard + * copyright (c) 2002-2004 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file mpeg12data.c + * MPEG1/2 tables. + */ + +#include "mpeg12data.h" + +const uint16_t ff_mpeg1_default_intra_matrix[64] = { + 8, 16, 19, 22, 26, 27, 29, 34, + 16, 16, 22, 24, 27, 29, 34, 37, + 19, 22, 26, 27, 29, 34, 34, 38, + 22, 22, 26, 27, 29, 34, 37, 40, + 22, 26, 27, 29, 32, 35, 40, 48, + 26, 27, 29, 32, 35, 40, 48, 58, + 26, 27, 29, 34, 38, 46, 56, 69, + 27, 29, 35, 38, 46, 56, 69, 83 +}; + +const uint16_t ff_mpeg1_default_non_intra_matrix[64] = { + 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, +}; + +const uint16_t ff_mpeg12_vlc_dc_lum_code[12] = { + 0x4, 0x0, 0x1, 0x5, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe, 0x1ff, +}; +const unsigned char ff_mpeg12_vlc_dc_lum_bits[12] = { + 3, 2, 2, 3, 3, 4, 5, 6, 7, 8, 9, 9, +}; + +const uint16_t ff_mpeg12_vlc_dc_chroma_code[12] = { + 0x0, 0x1, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe, 0x3fe, 0x3ff, +}; +const unsigned char ff_mpeg12_vlc_dc_chroma_bits[12] = { + 2, 2, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, +}; + +static const uint16_t mpeg1_vlc[113][2] = { + { 0x3, 2 }, { 0x4, 4 }, { 0x5, 5 }, { 0x6, 7 }, + { 0x26, 8 }, { 0x21, 8 }, { 0xa, 10 }, { 0x1d, 12 }, + { 0x18, 12 }, { 0x13, 12 }, { 0x10, 12 }, { 0x1a, 13 }, + { 0x19, 13 }, { 0x18, 13 }, { 0x17, 13 }, { 0x1f, 14 }, + { 0x1e, 14 }, { 0x1d, 14 }, { 0x1c, 14 }, { 0x1b, 14 }, + { 0x1a, 14 }, { 0x19, 14 }, { 0x18, 14 }, { 0x17, 14 }, + { 0x16, 14 }, { 0x15, 14 }, { 0x14, 14 }, { 0x13, 14 }, + { 0x12, 14 }, { 0x11, 14 }, { 0x10, 14 }, { 0x18, 15 }, + { 0x17, 15 }, { 0x16, 15 }, { 0x15, 15 }, { 0x14, 15 }, + { 0x13, 15 }, { 0x12, 15 }, { 0x11, 15 }, { 0x10, 15 }, + { 0x3, 3 }, { 0x6, 6 }, { 0x25, 8 }, { 0xc, 10 }, + { 0x1b, 12 }, { 0x16, 13 }, { 0x15, 13 }, { 0x1f, 15 }, + { 0x1e, 15 }, { 0x1d, 15 }, { 0x1c, 15 }, { 0x1b, 15 }, + { 0x1a, 15 }, { 0x19, 15 }, { 0x13, 16 }, { 0x12, 16 }, + { 0x11, 16 }, { 0x10, 16 }, { 0x5, 4 }, { 0x4, 7 }, + { 0xb, 10 }, { 0x14, 12 }, { 0x14, 13 }, { 0x7, 5 }, + { 0x24, 8 }, { 0x1c, 12 }, { 0x13, 13 }, { 0x6, 5 }, + { 0xf, 10 }, { 0x12, 12 }, { 0x7, 6 }, { 0x9, 10 }, + { 0x12, 13 }, { 0x5, 6 }, { 0x1e, 12 }, { 0x14, 16 }, + { 0x4, 6 }, { 0x15, 12 }, { 0x7, 7 }, { 0x11, 12 }, + { 0x5, 7 }, { 0x11, 13 }, { 0x27, 8 }, { 0x10, 13 }, + { 0x23, 8 }, { 0x1a, 16 }, { 0x22, 8 }, { 0x19, 16 }, + { 0x20, 8 }, { 0x18, 16 }, { 0xe, 10 }, { 0x17, 16 }, + { 0xd, 10 }, { 0x16, 16 }, { 0x8, 10 }, { 0x15, 16 }, + { 0x1f, 12 }, { 0x1a, 12 }, { 0x19, 12 }, { 0x17, 12 }, + { 0x16, 12 }, { 0x1f, 13 }, { 0x1e, 13 }, { 0x1d, 13 }, + { 0x1c, 13 }, { 0x1b, 13 }, { 0x1f, 16 }, { 0x1e, 16 }, + { 0x1d, 16 }, { 0x1c, 16 }, { 0x1b, 16 }, + { 0x1, 6 }, /* escape */ + { 0x2, 2 }, /* EOB */ +}; + +static const uint16_t mpeg2_vlc[113][2] = { + {0x02, 2}, {0x06, 3}, {0x07, 4}, {0x1c, 5}, + {0x1d, 5}, {0x05, 6}, {0x04, 6}, {0x7b, 7}, + {0x7c, 7}, {0x23, 8}, {0x22, 8}, {0xfa, 8}, + {0xfb, 8}, {0xfe, 8}, {0xff, 8}, {0x1f,14}, + {0x1e,14}, {0x1d,14}, {0x1c,14}, {0x1b,14}, + {0x1a,14}, {0x19,14}, {0x18,14}, {0x17,14}, + {0x16,14}, {0x15,14}, {0x14,14}, {0x13,14}, + {0x12,14}, {0x11,14}, {0x10,14}, {0x18,15}, + {0x17,15}, {0x16,15}, {0x15,15}, {0x14,15}, + {0x13,15}, {0x12,15}, {0x11,15}, {0x10,15}, + {0x02, 3}, {0x06, 5}, {0x79, 7}, {0x27, 8}, + {0x20, 8}, {0x16,13}, {0x15,13}, {0x1f,15}, + {0x1e,15}, {0x1d,15}, {0x1c,15}, {0x1b,15}, + {0x1a,15}, {0x19,15}, {0x13,16}, {0x12,16}, + {0x11,16}, {0x10,16}, {0x05, 5}, {0x07, 7}, + {0xfc, 8}, {0x0c,10}, {0x14,13}, {0x07, 5}, + {0x26, 8}, {0x1c,12}, {0x13,13}, {0x06, 6}, + {0xfd, 8}, {0x12,12}, {0x07, 6}, {0x04, 9}, + {0x12,13}, {0x06, 7}, {0x1e,12}, {0x14,16}, + {0x04, 7}, {0x15,12}, {0x05, 7}, {0x11,12}, + {0x78, 7}, {0x11,13}, {0x7a, 7}, {0x10,13}, + {0x21, 8}, {0x1a,16}, {0x25, 8}, {0x19,16}, + {0x24, 8}, {0x18,16}, {0x05, 9}, {0x17,16}, + {0x07, 9}, {0x16,16}, {0x0d,10}, {0x15,16}, + {0x1f,12}, {0x1a,12}, {0x19,12}, {0x17,12}, + {0x16,12}, {0x1f,13}, {0x1e,13}, {0x1d,13}, + {0x1c,13}, {0x1b,13}, {0x1f,16}, {0x1e,16}, + {0x1d,16}, {0x1c,16}, {0x1b,16}, + {0x01,6}, /* escape */ + {0x06,4}, /* EOB */ +}; + +static const int8_t mpeg1_level[111] = { + 1, 2, 3, 4, 5, 6, 7, 8, + 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, + 33, 34, 35, 36, 37, 38, 39, 40, + 1, 2, 3, 4, 5, 6, 7, 8, + 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 1, 2, 3, 4, 5, 1, + 2, 3, 4, 1, 2, 3, 1, 2, + 3, 1, 2, 3, 1, 2, 1, 2, + 1, 2, 1, 2, 1, 2, 1, 2, + 1, 2, 1, 2, 1, 2, 1, 2, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, +}; + +static const int8_t mpeg1_run[111] = { + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 2, 2, 2, 2, 2, 3, + 3, 3, 3, 4, 4, 4, 5, 5, + 5, 6, 6, 6, 7, 7, 8, 8, + 9, 9, 10, 10, 11, 11, 12, 12, + 13, 13, 14, 14, 15, 15, 16, 16, + 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, +}; + +RLTable ff_rl_mpeg1 = { + 111, + 111, + mpeg1_vlc, + mpeg1_run, + mpeg1_level, +}; + +RLTable ff_rl_mpeg2 = { + 111, + 111, + mpeg2_vlc, + mpeg1_run, + mpeg1_level, +}; + +const uint8_t ff_mpeg12_mbAddrIncrTable[36][2] = { + {0x1, 1}, + {0x3, 3}, + {0x2, 3}, + {0x3, 4}, + {0x2, 4}, + {0x3, 5}, + {0x2, 5}, + {0x7, 7}, + {0x6, 7}, + {0xb, 8}, + {0xa, 8}, + {0x9, 8}, + {0x8, 8}, + {0x7, 8}, + {0x6, 8}, + {0x17, 10}, + {0x16, 10}, + {0x15, 10}, + {0x14, 10}, + {0x13, 10}, + {0x12, 10}, + {0x23, 11}, + {0x22, 11}, + {0x21, 11}, + {0x20, 11}, + {0x1f, 11}, + {0x1e, 11}, + {0x1d, 11}, + {0x1c, 11}, + {0x1b, 11}, + {0x1a, 11}, + {0x19, 11}, + {0x18, 11}, + {0x8, 11}, /* escape */ + {0xf, 11}, /* stuffing */ + {0x0, 8}, /* end (and 15 more 0 bits should follow) */ +}; + +const uint8_t ff_mpeg12_mbPatTable[64][2] = { + {0x1, 9}, + {0xb, 5}, + {0x9, 5}, + {0xd, 6}, + {0xd, 4}, + {0x17, 7}, + {0x13, 7}, + {0x1f, 8}, + {0xc, 4}, + {0x16, 7}, + {0x12, 7}, + {0x1e, 8}, + {0x13, 5}, + {0x1b, 8}, + {0x17, 8}, + {0x13, 8}, + {0xb, 4}, + {0x15, 7}, + {0x11, 7}, + {0x1d, 8}, + {0x11, 5}, + {0x19, 8}, + {0x15, 8}, + {0x11, 8}, + {0xf, 6}, + {0xf, 8}, + {0xd, 8}, + {0x3, 9}, + {0xf, 5}, + {0xb, 8}, + {0x7, 8}, + {0x7, 9}, + {0xa, 4}, + {0x14, 7}, + {0x10, 7}, + {0x1c, 8}, + {0xe, 6}, + {0xe, 8}, + {0xc, 8}, + {0x2, 9}, + {0x10, 5}, + {0x18, 8}, + {0x14, 8}, + {0x10, 8}, + {0xe, 5}, + {0xa, 8}, + {0x6, 8}, + {0x6, 9}, + {0x12, 5}, + {0x1a, 8}, + {0x16, 8}, + {0x12, 8}, + {0xd, 5}, + {0x9, 8}, + {0x5, 8}, + {0x5, 9}, + {0xc, 5}, + {0x8, 8}, + {0x4, 8}, + {0x4, 9}, + {0x7, 3}, + {0xa, 5}, + {0x8, 5}, + {0xc, 6} +}; + +const uint8_t ff_mpeg12_mbMotionVectorTable[17][2] = { +{ 0x1, 1 }, +{ 0x1, 2 }, +{ 0x1, 3 }, +{ 0x1, 4 }, +{ 0x3, 6 }, +{ 0x5, 7 }, +{ 0x4, 7 }, +{ 0x3, 7 }, +{ 0xb, 9 }, +{ 0xa, 9 }, +{ 0x9, 9 }, +{ 0x11, 10 }, +{ 0x10, 10 }, +{ 0xf, 10 }, +{ 0xe, 10 }, +{ 0xd, 10 }, +{ 0xc, 10 }, +}; + +const AVRational ff_frame_rate_tab[] = { + { 0, 0}, + {24000, 1001}, + { 24, 1}, + { 25, 1}, + {30000, 1001}, + { 30, 1}, + { 50, 1}, + {60000, 1001}, + { 60, 1}, + // Xing's 15fps: (9) + { 15, 1}, + // libmpeg3's "Unofficial economy rates": (10-13) + { 5, 1}, + { 10, 1}, + { 12, 1}, + { 15, 1}, + { 0, 0}, +}; + +const float ff_mpeg1_aspect[16]={ + 0.0000, + 1.0000, + 0.6735, + 0.7031, + + 0.7615, + 0.8055, + 0.8437, + 0.8935, + + 0.9157, + 0.9815, + 1.0255, + 1.0695, + + 1.0950, + 1.1575, + 1.2015, +}; + +const AVRational ff_mpeg2_aspect[16]={ + {0,1}, + {1,1}, + {4,3}, + {16,9}, + {221,100}, + {0,1}, + {0,1}, + {0,1}, + {0,1}, + {0,1}, + {0,1}, + {0,1}, + {0,1}, + {0,1}, + {0,1}, + {0,1}, +}; diff --git a/src/add-ons/media/plugins/avcodec/libavcodec/mpeg12data.h b/src/add-ons/media/plugins/avcodec/libavcodec/mpeg12data.h index 42b3d49a1f..e23ec83188 100644 --- a/src/add-ons/media/plugins/avcodec/libavcodec/mpeg12data.h +++ b/src/add-ons/media/plugins/avcodec/libavcodec/mpeg12data.h @@ -1,442 +1,56 @@ +/* + * MPEG1/2 tables + * copyright (c) 2000,2001 Fabrice Bellard + * copyright (c) 2002-2004 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + /** * @file mpeg12data.h * MPEG1/2 tables. */ -const int16_t ff_mpeg1_default_intra_matrix[64] = { - 8, 16, 19, 22, 26, 27, 29, 34, - 16, 16, 22, 24, 27, 29, 34, 37, - 19, 22, 26, 27, 29, 34, 34, 38, - 22, 22, 26, 27, 29, 34, 37, 40, - 22, 26, 27, 29, 32, 35, 40, 48, - 26, 27, 29, 32, 35, 40, 48, 58, - 26, 27, 29, 34, 38, 46, 56, 69, - 27, 29, 35, 38, 46, 56, 69, 83 -}; +#ifndef FFMPEG_MPEG12DATA_H +#define FFMPEG_MPEG12DATA_H -const int16_t ff_mpeg1_default_non_intra_matrix[64] = { - 16, 16, 16, 16, 16, 16, 16, 16, - 16, 16, 16, 16, 16, 16, 16, 16, - 16, 16, 16, 16, 16, 16, 16, 16, - 16, 16, 16, 16, 16, 16, 16, 16, - 16, 16, 16, 16, 16, 16, 16, 16, - 16, 16, 16, 16, 16, 16, 16, 16, - 16, 16, 16, 16, 16, 16, 16, 16, - 16, 16, 16, 16, 16, 16, 16, 16, -}; +#include +#include "rational.h" +#include "rl.h" -static const unsigned char vlc_dc_table[256] = { - 0, 1, 2, 2, - 3, 3, 3, 3, - 4, 4, 4, 4, 4, 4, 4, 4, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, +extern const uint16_t ff_mpeg1_default_intra_matrix[64]; +extern const uint16_t ff_mpeg1_default_non_intra_matrix[64]; - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, +extern const uint16_t ff_mpeg12_vlc_dc_lum_code[12]; +extern const unsigned char ff_mpeg12_vlc_dc_lum_bits[12]; +extern const uint16_t ff_mpeg12_vlc_dc_chroma_code[12]; +extern const unsigned char ff_mpeg12_vlc_dc_chroma_bits[12]; - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, -}; +extern RLTable ff_rl_mpeg1; +extern RLTable ff_rl_mpeg2; -static const uint16_t vlc_dc_lum_code[12] = { - 0x4, 0x0, 0x1, 0x5, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe, 0x1ff, -}; -static const unsigned char vlc_dc_lum_bits[12] = { - 3, 2, 2, 3, 3, 4, 5, 6, 7, 8, 9, 9, -}; +extern const uint8_t ff_mpeg12_mbAddrIncrTable[36][2]; +extern const uint8_t ff_mpeg12_mbPatTable[64][2]; -const uint16_t vlc_dc_chroma_code[12] = { - 0x0, 0x1, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe, 0x3fe, 0x3ff, -}; -const unsigned char vlc_dc_chroma_bits[12] = { - 2, 2, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, -}; +extern const uint8_t ff_mpeg12_mbMotionVectorTable[17][2]; -static const uint16_t mpeg1_vlc[113][2] = { - { 0x3, 2 }, { 0x4, 4 }, { 0x5, 5 }, { 0x6, 7 }, - { 0x26, 8 }, { 0x21, 8 }, { 0xa, 10 }, { 0x1d, 12 }, - { 0x18, 12 }, { 0x13, 12 }, { 0x10, 12 }, { 0x1a, 13 }, - { 0x19, 13 }, { 0x18, 13 }, { 0x17, 13 }, { 0x1f, 14 }, - { 0x1e, 14 }, { 0x1d, 14 }, { 0x1c, 14 }, { 0x1b, 14 }, - { 0x1a, 14 }, { 0x19, 14 }, { 0x18, 14 }, { 0x17, 14 }, - { 0x16, 14 }, { 0x15, 14 }, { 0x14, 14 }, { 0x13, 14 }, - { 0x12, 14 }, { 0x11, 14 }, { 0x10, 14 }, { 0x18, 15 }, - { 0x17, 15 }, { 0x16, 15 }, { 0x15, 15 }, { 0x14, 15 }, - { 0x13, 15 }, { 0x12, 15 }, { 0x11, 15 }, { 0x10, 15 }, - { 0x3, 3 }, { 0x6, 6 }, { 0x25, 8 }, { 0xc, 10 }, - { 0x1b, 12 }, { 0x16, 13 }, { 0x15, 13 }, { 0x1f, 15 }, - { 0x1e, 15 }, { 0x1d, 15 }, { 0x1c, 15 }, { 0x1b, 15 }, - { 0x1a, 15 }, { 0x19, 15 }, { 0x13, 16 }, { 0x12, 16 }, - { 0x11, 16 }, { 0x10, 16 }, { 0x5, 4 }, { 0x4, 7 }, - { 0xb, 10 }, { 0x14, 12 }, { 0x14, 13 }, { 0x7, 5 }, - { 0x24, 8 }, { 0x1c, 12 }, { 0x13, 13 }, { 0x6, 5 }, - { 0xf, 10 }, { 0x12, 12 }, { 0x7, 6 }, { 0x9, 10 }, - { 0x12, 13 }, { 0x5, 6 }, { 0x1e, 12 }, { 0x14, 16 }, - { 0x4, 6 }, { 0x15, 12 }, { 0x7, 7 }, { 0x11, 12 }, - { 0x5, 7 }, { 0x11, 13 }, { 0x27, 8 }, { 0x10, 13 }, - { 0x23, 8 }, { 0x1a, 16 }, { 0x22, 8 }, { 0x19, 16 }, - { 0x20, 8 }, { 0x18, 16 }, { 0xe, 10 }, { 0x17, 16 }, - { 0xd, 10 }, { 0x16, 16 }, { 0x8, 10 }, { 0x15, 16 }, - { 0x1f, 12 }, { 0x1a, 12 }, { 0x19, 12 }, { 0x17, 12 }, - { 0x16, 12 }, { 0x1f, 13 }, { 0x1e, 13 }, { 0x1d, 13 }, - { 0x1c, 13 }, { 0x1b, 13 }, { 0x1f, 16 }, { 0x1e, 16 }, - { 0x1d, 16 }, { 0x1c, 16 }, { 0x1b, 16 }, - { 0x1, 6 }, /* escape */ - { 0x2, 2 }, /* EOB */ -}; +extern const AVRational ff_frame_rate_tab[]; -static const uint16_t mpeg2_vlc[113][2] = { - {0x02, 2}, {0x06, 3}, {0x07, 4}, {0x1c, 5}, - {0x1d, 5}, {0x05, 6}, {0x04, 6}, {0x7b, 7}, - {0x7c, 7}, {0x23, 8}, {0x22, 8}, {0xfa, 8}, - {0xfb, 8}, {0xfe, 8}, {0xff, 8}, {0x1f,14}, - {0x1e,14}, {0x1d,14}, {0x1c,14}, {0x1b,14}, - {0x1a,14}, {0x19,14}, {0x18,14}, {0x17,14}, - {0x16,14}, {0x15,14}, {0x14,14}, {0x13,14}, - {0x12,14}, {0x11,14}, {0x10,14}, {0x18,15}, - {0x17,15}, {0x16,15}, {0x15,15}, {0x14,15}, - {0x13,15}, {0x12,15}, {0x11,15}, {0x10,15}, - {0x02, 3}, {0x06, 5}, {0x79, 7}, {0x27, 8}, - {0x20, 8}, {0x16,13}, {0x15,13}, {0x1f,15}, - {0x1e,15}, {0x1d,15}, {0x1c,15}, {0x1b,15}, - {0x1a,15}, {0x19,15}, {0x13,16}, {0x12,16}, - {0x11,16}, {0x10,16}, {0x05, 5}, {0x07, 7}, - {0xfc, 8}, {0x0c,10}, {0x14,13}, {0x07, 5}, - {0x26, 8}, {0x1c,12}, {0x13,13}, {0x06, 6}, - {0xfd, 8}, {0x12,12}, {0x07, 6}, {0x04, 9}, - {0x12,13}, {0x06, 7}, {0x1e,12}, {0x14,16}, - {0x04, 7}, {0x15,12}, {0x05, 7}, {0x11,12}, - {0x78, 7}, {0x11,13}, {0x7a, 7}, {0x10,13}, - {0x21, 8}, {0x1a,16}, {0x25, 8}, {0x19,16}, - {0x24, 8}, {0x18,16}, {0x05, 9}, {0x17,16}, - {0x07, 9}, {0x16,16}, {0x0d,10}, {0x15,16}, - {0x1f,12}, {0x1a,12}, {0x19,12}, {0x17,12}, - {0x16,12}, {0x1f,13}, {0x1e,13}, {0x1d,13}, - {0x1c,13}, {0x1b,13}, {0x1f,16}, {0x1e,16}, - {0x1d,16}, {0x1c,16}, {0x1b,16}, - {0x01,6}, /* escape */ - {0x06,4}, /* EOB */ -}; +extern const float ff_mpeg1_aspect[16]; +extern const AVRational ff_mpeg2_aspect[16]; -static const int8_t mpeg1_level[111] = { - 1, 2, 3, 4, 5, 6, 7, 8, - 9, 10, 11, 12, 13, 14, 15, 16, - 17, 18, 19, 20, 21, 22, 23, 24, - 25, 26, 27, 28, 29, 30, 31, 32, - 33, 34, 35, 36, 37, 38, 39, 40, - 1, 2, 3, 4, 5, 6, 7, 8, - 9, 10, 11, 12, 13, 14, 15, 16, - 17, 18, 1, 2, 3, 4, 5, 1, - 2, 3, 4, 1, 2, 3, 1, 2, - 3, 1, 2, 3, 1, 2, 1, 2, - 1, 2, 1, 2, 1, 2, 1, 2, - 1, 2, 1, 2, 1, 2, 1, 2, - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, -}; - -static const int8_t mpeg1_run[111] = { - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 2, 2, 2, 2, 2, 3, - 3, 3, 3, 4, 4, 4, 5, 5, - 5, 6, 6, 6, 7, 7, 8, 8, - 9, 9, 10, 10, 11, 11, 12, 12, - 13, 13, 14, 14, 15, 15, 16, 16, - 17, 18, 19, 20, 21, 22, 23, 24, - 25, 26, 27, 28, 29, 30, 31, -}; - -static RLTable rl_mpeg1 = { - 111, - 111, - mpeg1_vlc, - mpeg1_run, - mpeg1_level, -}; - -static RLTable rl_mpeg2 = { - 111, - 111, - mpeg2_vlc, - mpeg1_run, - mpeg1_level, -}; - -static const uint8_t mbAddrIncrTable[36][2] = { - {0x1, 1}, - {0x3, 3}, - {0x2, 3}, - {0x3, 4}, - {0x2, 4}, - {0x3, 5}, - {0x2, 5}, - {0x7, 7}, - {0x6, 7}, - {0xb, 8}, - {0xa, 8}, - {0x9, 8}, - {0x8, 8}, - {0x7, 8}, - {0x6, 8}, - {0x17, 10}, - {0x16, 10}, - {0x15, 10}, - {0x14, 10}, - {0x13, 10}, - {0x12, 10}, - {0x23, 11}, - {0x22, 11}, - {0x21, 11}, - {0x20, 11}, - {0x1f, 11}, - {0x1e, 11}, - {0x1d, 11}, - {0x1c, 11}, - {0x1b, 11}, - {0x1a, 11}, - {0x19, 11}, - {0x18, 11}, - {0x8, 11}, /* escape */ - {0xf, 11}, /* stuffing */ - {0x0, 8}, /* end (and 15 more 0 bits should follow) */ -}; - -static const uint8_t mbPatTable[63][2] = { - {0xb, 5}, - {0x9, 5}, - {0xd, 6}, - {0xd, 4}, - {0x17, 7}, - {0x13, 7}, - {0x1f, 8}, - {0xc, 4}, - {0x16, 7}, - {0x12, 7}, - {0x1e, 8}, - {0x13, 5}, - {0x1b, 8}, - {0x17, 8}, - {0x13, 8}, - {0xb, 4}, - {0x15, 7}, - {0x11, 7}, - {0x1d, 8}, - {0x11, 5}, - {0x19, 8}, - {0x15, 8}, - {0x11, 8}, - {0xf, 6}, - {0xf, 8}, - {0xd, 8}, - {0x3, 9}, - {0xf, 5}, - {0xb, 8}, - {0x7, 8}, - {0x7, 9}, - {0xa, 4}, - {0x14, 7}, - {0x10, 7}, - {0x1c, 8}, - {0xe, 6}, - {0xe, 8}, - {0xc, 8}, - {0x2, 9}, - {0x10, 5}, - {0x18, 8}, - {0x14, 8}, - {0x10, 8}, - {0xe, 5}, - {0xa, 8}, - {0x6, 8}, - {0x6, 9}, - {0x12, 5}, - {0x1a, 8}, - {0x16, 8}, - {0x12, 8}, - {0xd, 5}, - {0x9, 8}, - {0x5, 8}, - {0x5, 9}, - {0xc, 5}, - {0x8, 8}, - {0x4, 8}, - {0x4, 9}, - {0x7, 3}, - {0xa, 5}, - {0x8, 5}, - {0xc, 6} -}; - -#define MB_TYPE_ZERO_MV 0x20000000 -#define IS_ZERO_MV(a) ((a)&MB_TYPE_ZERO_MV) - -static const uint8_t table_mb_ptype[7][2] = { - { 3, 5 }, // 0x01 MB_INTRA - { 1, 2 }, // 0x02 MB_PAT - { 1, 3 }, // 0x08 MB_FOR - { 1, 1 }, // 0x0A MB_FOR|MB_PAT - { 1, 6 }, // 0x11 MB_QUANT|MB_INTRA - { 1, 5 }, // 0x12 MB_QUANT|MB_PAT - { 2, 5 }, // 0x1A MB_QUANT|MB_FOR|MB_PAT -}; - -static const uint32_t ptype2mb_type[7] = { - MB_TYPE_INTRA, - MB_TYPE_L0 | MB_TYPE_CBP | MB_TYPE_ZERO_MV | MB_TYPE_16x16, - MB_TYPE_L0, - MB_TYPE_L0 | MB_TYPE_CBP, - MB_TYPE_QUANT | MB_TYPE_INTRA, - MB_TYPE_QUANT | MB_TYPE_L0 | MB_TYPE_CBP | MB_TYPE_ZERO_MV | MB_TYPE_16x16, - MB_TYPE_QUANT | MB_TYPE_L0 | MB_TYPE_CBP, -}; - -static const uint8_t table_mb_btype[11][2] = { - { 3, 5 }, // 0x01 MB_INTRA - { 2, 3 }, // 0x04 MB_BACK - { 3, 3 }, // 0x06 MB_BACK|MB_PAT - { 2, 4 }, // 0x08 MB_FOR - { 3, 4 }, // 0x0A MB_FOR|MB_PAT - { 2, 2 }, // 0x0C MB_FOR|MB_BACK - { 3, 2 }, // 0x0E MB_FOR|MB_BACK|MB_PAT - { 1, 6 }, // 0x11 MB_QUANT|MB_INTRA - { 2, 6 }, // 0x16 MB_QUANT|MB_BACK|MB_PAT - { 3, 6 }, // 0x1A MB_QUANT|MB_FOR|MB_PAT - { 2, 5 }, // 0x1E MB_QUANT|MB_FOR|MB_BACK|MB_PAT -}; - -static const uint32_t btype2mb_type[11] = { - MB_TYPE_INTRA, - MB_TYPE_L1, - MB_TYPE_L1 | MB_TYPE_CBP, - MB_TYPE_L0, - MB_TYPE_L0 | MB_TYPE_CBP, - MB_TYPE_L0L1, - MB_TYPE_L0L1 | MB_TYPE_CBP, - MB_TYPE_QUANT | MB_TYPE_INTRA, - MB_TYPE_QUANT | MB_TYPE_L1 | MB_TYPE_CBP, - MB_TYPE_QUANT | MB_TYPE_L0 | MB_TYPE_CBP, - MB_TYPE_QUANT | MB_TYPE_L0L1 | MB_TYPE_CBP, -}; - -static const uint8_t mbMotionVectorTable[17][2] = { -{ 0x1, 1 }, -{ 0x1, 2 }, -{ 0x1, 3 }, -{ 0x1, 4 }, -{ 0x3, 6 }, -{ 0x5, 7 }, -{ 0x4, 7 }, -{ 0x3, 7 }, -{ 0xb, 9 }, -{ 0xa, 9 }, -{ 0x9, 9 }, -{ 0x11, 10 }, -{ 0x10, 10 }, -{ 0xf, 10 }, -{ 0xe, 10 }, -{ 0xd, 10 }, -{ 0xc, 10 }, -}; - -#define MPEG1_FRAME_RATE_BASE 1001 - -static const int frame_rate_tab[16] = { - 0, - 24000, - 24024, - 25025, - 30000, - 30030, - 50050, - 60000, - 60060, - // Xing's 15fps: (9) - 15015, - // libmpeg3's "Unofficial economy rates": (10-13) - 5005, - 10010, - 12012, - 15015, - // random, just to avoid segfault !never encode these - 25025, - 25025, -}; - -static const uint8_t non_linear_qscale[32] = { - 0, 1, 2, 3, 4, 5, 6, 7, - 8,10,12,14,16,18,20,22, - 24,28,32,36,40,44,48,52, - 56,64,72,80,88,96,104,112, -}; - -uint8_t ff_mpeg1_dc_scale_table[128]={ // MN: mpeg2 really can have such large qscales? -// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, -}; - -static const float mpeg1_aspect[16]={ - 0.0000, - 1.0000, - 0.6735, - 0.7031, - - 0.7615, - 0.8055, - 0.8437, - 0.8935, - - 0.9157, - 0.9815, - 1.0255, - 1.0695, - - 1.0950, - 1.1575, - 1.2015, -}; - -static const AVRational mpeg2_aspect[16]={ - {0,1}, - {1,1}, - {4,3}, - {16,9}, - {221,100}, - {0,1}, - {0,1}, - {0,1}, - {0,1}, - {0,1}, - {0,1}, - {0,1}, - {0,1}, - {0,1}, - {0,1}, - {0,1}, -}; - -static const uint8_t svcd_scan_offset_placeholder[14]={ - 0x10, 0x0E, - 0x00, 0x80, 0x81, - 0x00, 0x80, 0x81, - 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, -}; +#endif /* FFMPEG_MPEG12DATA_H */ diff --git a/src/add-ons/media/plugins/avcodec/libavcodec/mpeg12decdata.h b/src/add-ons/media/plugins/avcodec/libavcodec/mpeg12decdata.h new file mode 100644 index 0000000000..c6ba2bb4d9 --- /dev/null +++ b/src/add-ons/media/plugins/avcodec/libavcodec/mpeg12decdata.h @@ -0,0 +1,124 @@ +/* + * MPEG1/2 decoder tables + * copyright (c) 2000,2001 Fabrice Bellard + * copyright (c) 2002-2004 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file mpeg12decdata.h + * MPEG1/2 decoder tables. + */ + +#ifndef FFMPEG_MPEG12DECDATA_H +#define FFMPEG_MPEG12DECDATA_H + +#include +#include "mpegvideo.h" + + +#define MB_TYPE_ZERO_MV 0x20000000 +#define IS_ZERO_MV(a) ((a)&MB_TYPE_ZERO_MV) + +static const uint8_t table_mb_ptype[7][2] = { + { 3, 5 }, // 0x01 MB_INTRA + { 1, 2 }, // 0x02 MB_PAT + { 1, 3 }, // 0x08 MB_FOR + { 1, 1 }, // 0x0A MB_FOR|MB_PAT + { 1, 6 }, // 0x11 MB_QUANT|MB_INTRA + { 1, 5 }, // 0x12 MB_QUANT|MB_PAT + { 2, 5 }, // 0x1A MB_QUANT|MB_FOR|MB_PAT +}; + +static const uint32_t ptype2mb_type[7] = { + MB_TYPE_INTRA, + MB_TYPE_L0 | MB_TYPE_CBP | MB_TYPE_ZERO_MV | MB_TYPE_16x16, + MB_TYPE_L0, + MB_TYPE_L0 | MB_TYPE_CBP, + MB_TYPE_QUANT | MB_TYPE_INTRA, + MB_TYPE_QUANT | MB_TYPE_L0 | MB_TYPE_CBP | MB_TYPE_ZERO_MV | MB_TYPE_16x16, + MB_TYPE_QUANT | MB_TYPE_L0 | MB_TYPE_CBP, +}; + +static const uint8_t table_mb_btype[11][2] = { + { 3, 5 }, // 0x01 MB_INTRA + { 2, 3 }, // 0x04 MB_BACK + { 3, 3 }, // 0x06 MB_BACK|MB_PAT + { 2, 4 }, // 0x08 MB_FOR + { 3, 4 }, // 0x0A MB_FOR|MB_PAT + { 2, 2 }, // 0x0C MB_FOR|MB_BACK + { 3, 2 }, // 0x0E MB_FOR|MB_BACK|MB_PAT + { 1, 6 }, // 0x11 MB_QUANT|MB_INTRA + { 2, 6 }, // 0x16 MB_QUANT|MB_BACK|MB_PAT + { 3, 6 }, // 0x1A MB_QUANT|MB_FOR|MB_PAT + { 2, 5 }, // 0x1E MB_QUANT|MB_FOR|MB_BACK|MB_PAT +}; + +static const uint32_t btype2mb_type[11] = { + MB_TYPE_INTRA, + MB_TYPE_L1, + MB_TYPE_L1 | MB_TYPE_CBP, + MB_TYPE_L0, + MB_TYPE_L0 | MB_TYPE_CBP, + MB_TYPE_L0L1, + MB_TYPE_L0L1 | MB_TYPE_CBP, + MB_TYPE_QUANT | MB_TYPE_INTRA, + MB_TYPE_QUANT | MB_TYPE_L1 | MB_TYPE_CBP, + MB_TYPE_QUANT | MB_TYPE_L0 | MB_TYPE_CBP, + MB_TYPE_QUANT | MB_TYPE_L0L1 | MB_TYPE_CBP, +}; + +static const uint8_t non_linear_qscale[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, + 8,10,12,14,16,18,20,22, + 24,28,32,36,40,44,48,52, + 56,64,72,80,88,96,104,112, +}; + +static const uint8_t mpeg2_dc_scale_table1[128]={ +// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, +}; + +static const uint8_t mpeg2_dc_scale_table2[128]={ +// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, +}; + +static const uint8_t mpeg2_dc_scale_table3[128]={ +// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +}; + +static const uint8_t * const mpeg2_dc_scale_table[4]={ + ff_mpeg1_dc_scale_table, + mpeg2_dc_scale_table1, + mpeg2_dc_scale_table2, + mpeg2_dc_scale_table3, +}; + +#endif /* FFMPEG_MPEG12DECDATA_H */ diff --git a/src/add-ons/media/plugins/avcodec/libavcodec/mpeg12enc.c b/src/add-ons/media/plugins/avcodec/libavcodec/mpeg12enc.c new file mode 100644 index 0000000000..6ef7e1ba75 --- /dev/null +++ b/src/add-ons/media/plugins/avcodec/libavcodec/mpeg12enc.c @@ -0,0 +1,956 @@ +/* + * MPEG1/2 encoder + * Copyright (c) 2000,2001 Fabrice Bellard. + * Copyright (c) 2002-2004 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file mpeg12enc.c + * MPEG1/2 encoder + */ + +#include "avcodec.h" +#include "dsputil.h" +#include "mpegvideo.h" + +#include "mpeg12.h" +#include "mpeg12data.h" +#include "bytestream.h" + + +static const uint8_t inv_non_linear_qscale[13] = { + 0, 2, 4, 6, 8, + 9,10,11,12,13,14,15,16, +}; + +static const uint8_t svcd_scan_offset_placeholder[14] = { + 0x10, 0x0E, + 0x00, 0x80, 0x81, + 0x00, 0x80, 0x81, + 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, +}; + +static void mpeg1_encode_block(MpegEncContext *s, + DCTELEM *block, + int component); +static void mpeg1_encode_motion(MpegEncContext *s, int val, int f_or_b_code); // RAL: f_code parameter added + +static uint8_t mv_penalty[MAX_FCODE+1][MAX_MV*2+1]; +static uint8_t fcode_tab[MAX_MV*2+1]; + +static uint8_t uni_mpeg1_ac_vlc_len [64*64*2]; +static uint8_t uni_mpeg2_ac_vlc_len [64*64*2]; + +/* simple include everything table for dc, first byte is bits number next 3 are code*/ +static uint32_t mpeg1_lum_dc_uni[512]; +static uint32_t mpeg1_chr_dc_uni[512]; + +static uint8_t mpeg1_index_run[2][64]; +static int8_t mpeg1_max_level[2][64]; + +static void init_uni_ac_vlc(RLTable *rl, uint8_t *uni_ac_vlc_len){ + int i; + + for(i=0; i<128; i++){ + int level= i-64; + int run; + for(run=0; run<64; run++){ + int len, bits, code; + + int alevel= FFABS(level); + int sign= (level>>31)&1; + + if (alevel > rl->max_level[0][run]) + code= 111; /*rl->n*/ + else + code= rl->index_run[0][run] + alevel - 1; + + if (code < 111 /* rl->n */) { + /* store the vlc & sign at once */ + len= rl->table_vlc[code][1]+1; + bits= (rl->table_vlc[code][0]<<1) + sign; + } else { + len= rl->table_vlc[111/*rl->n*/][1]+6; + bits= rl->table_vlc[111/*rl->n*/][0]<<6; + + bits|= run; + if (alevel < 128) { + bits<<=8; len+=8; + bits|= level & 0xff; + } else { + bits<<=16; len+=16; + bits|= level & 0xff; + if (level < 0) { + bits|= 0x8001 + level + 255; + } else { + bits|= level & 0xffff; + } + } + } + + uni_ac_vlc_len [UNI_AC_ENC_INDEX(run, i)]= len; + } + } +} + + +static int find_frame_rate_index(MpegEncContext *s){ + int i; + int64_t dmin= INT64_MAX; + int64_t d; + + for(i=1;i<14;i++) { + int64_t n0= 1001LL/ff_frame_rate_tab[i].den*ff_frame_rate_tab[i].num*s->avctx->time_base.num; + int64_t n1= 1001LL*s->avctx->time_base.den; + if(s->avctx->strict_std_compliance > FF_COMPLIANCE_INOFFICIAL && i>=9) break; + + d = FFABS(n0 - n1); + if(d < dmin){ + dmin=d; + s->frame_rate_index= i; + } + } + if(dmin) + return -1; + else + return 0; +} + +static av_cold int encode_init(AVCodecContext *avctx) +{ + MpegEncContext *s = avctx->priv_data; + + if(MPV_encode_init(avctx) < 0) + return -1; + + if(find_frame_rate_index(s) < 0){ + if(s->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL){ + av_log(avctx, AV_LOG_ERROR, "MPEG1/2 does not support %d/%d fps\n", avctx->time_base.den, avctx->time_base.num); + return -1; + }else{ + av_log(avctx, AV_LOG_INFO, "MPEG1/2 does not support %d/%d fps, there may be AV sync issues\n", avctx->time_base.den, avctx->time_base.num); + } + } + + if(avctx->profile == FF_PROFILE_UNKNOWN){ + if(avctx->level != FF_LEVEL_UNKNOWN){ + av_log(avctx, AV_LOG_ERROR, "Set profile and level\n"); + return -1; + } + avctx->profile = s->chroma_format == CHROMA_420 ? 4 : 0; /* Main or 4:2:2 */ + } + + if(avctx->level == FF_LEVEL_UNKNOWN){ + if(avctx->profile == 0){ /* 4:2:2 */ + if(avctx->width <= 720 && avctx->height <= 608) avctx->level = 5; /* Main */ + else avctx->level = 2; /* High */ + }else{ + if(avctx->profile != 1 && s->chroma_format != CHROMA_420){ + av_log(avctx, AV_LOG_ERROR, "Only High(1) and 4:2:2(0) profiles support 4:2:2 color sampling\n"); + return -1; + } + if(avctx->width <= 720 && avctx->height <= 576) avctx->level = 8; /* Main */ + else if(avctx->width <= 1440) avctx->level = 6; /* High 1440 */ + else avctx->level = 4; /* High */ + } + } + + if((avctx->flags2 & CODEC_FLAG2_DROP_FRAME_TIMECODE) && s->frame_rate_index != 4){ + av_log(avctx, AV_LOG_ERROR, "Drop frame time code only allowed with 1001/30000 fps\n"); + return -1; + } + + return 0; +} + +static void put_header(MpegEncContext *s, int header) +{ + align_put_bits(&s->pb); + put_bits(&s->pb, 16, header>>16); + put_sbits(&s->pb, 16, header); +} + +/* put sequence header if needed */ +static void mpeg1_encode_sequence_header(MpegEncContext *s) +{ + unsigned int vbv_buffer_size; + unsigned int fps, v; + int i; + uint64_t time_code; + float best_aspect_error= 1E10; + float aspect_ratio= av_q2d(s->avctx->sample_aspect_ratio); + int constraint_parameter_flag; + + if(aspect_ratio==0.0) aspect_ratio= 1.0; //pixel aspect 1:1 (VGA) + + if (s->current_picture.key_frame) { + AVRational framerate= ff_frame_rate_tab[s->frame_rate_index]; + + /* mpeg1 header repeated every gop */ + put_header(s, SEQ_START_CODE); + + put_sbits(&s->pb, 12, s->width ); + put_sbits(&s->pb, 12, s->height); + + for(i=1; i<15; i++){ + float error= aspect_ratio; + if(s->codec_id == CODEC_ID_MPEG1VIDEO || i <=1) + error-= 1.0/ff_mpeg1_aspect[i]; + else + error-= av_q2d(ff_mpeg2_aspect[i])*s->height/s->width; + + error= FFABS(error); + + if(error < best_aspect_error){ + best_aspect_error= error; + s->aspect_ratio_info= i; + } + } + + put_bits(&s->pb, 4, s->aspect_ratio_info); + put_bits(&s->pb, 4, s->frame_rate_index); + + if(s->avctx->rc_max_rate){ + v = (s->avctx->rc_max_rate + 399) / 400; + if (v > 0x3ffff && s->codec_id == CODEC_ID_MPEG1VIDEO) + v = 0x3ffff; + }else{ + v= 0x3FFFF; + } + + if(s->avctx->rc_buffer_size) + vbv_buffer_size = s->avctx->rc_buffer_size; + else + /* VBV calculation: Scaled so that a VCD has the proper VBV size of 40 kilobytes */ + vbv_buffer_size = (( 20 * s->bit_rate) / (1151929 / 2)) * 8 * 1024; + vbv_buffer_size= (vbv_buffer_size + 16383) / 16384; + + put_sbits(&s->pb, 18, v); + put_bits(&s->pb, 1, 1); /* marker */ + put_sbits(&s->pb, 10, vbv_buffer_size); + + constraint_parameter_flag= + s->width <= 768 && s->height <= 576 && + s->mb_width * s->mb_height <= 396 && + s->mb_width * s->mb_height * framerate.num <= framerate.den*396*25 && + framerate.num <= framerate.den*30 && + s->avctx->me_range && s->avctx->me_range < 128 && + vbv_buffer_size <= 20 && + v <= 1856000/400 && + s->codec_id == CODEC_ID_MPEG1VIDEO; + + put_bits(&s->pb, 1, constraint_parameter_flag); + + ff_write_quant_matrix(&s->pb, s->avctx->intra_matrix); + ff_write_quant_matrix(&s->pb, s->avctx->inter_matrix); + + if(s->codec_id == CODEC_ID_MPEG2VIDEO){ + put_header(s, EXT_START_CODE); + put_bits(&s->pb, 4, 1); //seq ext + + put_bits(&s->pb, 1, s->avctx->profile == 0); //escx 1 for 4:2:2 profile */ + + put_bits(&s->pb, 3, s->avctx->profile); //profile + put_bits(&s->pb, 4, s->avctx->level); //level + + put_bits(&s->pb, 1, s->progressive_sequence); + put_bits(&s->pb, 2, s->chroma_format); + put_bits(&s->pb, 2, s->width >>12); + put_bits(&s->pb, 2, s->height>>12); + put_bits(&s->pb, 12, v>>18); //bitrate ext + put_bits(&s->pb, 1, 1); //marker + put_bits(&s->pb, 8, vbv_buffer_size >>10); //vbv buffer ext + put_bits(&s->pb, 1, s->low_delay); + put_bits(&s->pb, 2, 0); // frame_rate_ext_n + put_bits(&s->pb, 5, 0); // frame_rate_ext_d + } + + put_header(s, GOP_START_CODE); + put_bits(&s->pb, 1, !!(s->avctx->flags2 & CODEC_FLAG2_DROP_FRAME_TIMECODE)); /* drop frame flag */ + /* time code : we must convert from the real frame rate to a + fake mpeg frame rate in case of low frame rate */ + fps = (framerate.num + framerate.den/2)/ framerate.den; + time_code = s->current_picture_ptr->coded_picture_number + s->avctx->timecode_frame_start; + + s->gop_picture_number = s->current_picture_ptr->coded_picture_number; + if (s->avctx->flags2 & CODEC_FLAG2_DROP_FRAME_TIMECODE) { + /* only works for NTSC 29.97 */ + int d = time_code / 17982; + int m = time_code % 17982; + //if (m < 2) m += 2; /* not needed since -2,-1 / 1798 in C returns 0 */ + time_code += 18 * d + 2 * ((m - 2) / 1798); + } + put_bits(&s->pb, 5, (uint32_t)((time_code / (fps * 3600)) % 24)); + put_bits(&s->pb, 6, (uint32_t)((time_code / (fps * 60)) % 60)); + put_bits(&s->pb, 1, 1); + put_bits(&s->pb, 6, (uint32_t)((time_code / fps) % 60)); + put_bits(&s->pb, 6, (uint32_t)((time_code % fps))); + put_bits(&s->pb, 1, !!(s->flags & CODEC_FLAG_CLOSED_GOP)); + put_bits(&s->pb, 1, 0); /* broken link */ + } +} + +static inline void encode_mb_skip_run(MpegEncContext *s, int run){ + while (run >= 33) { + put_bits(&s->pb, 11, 0x008); + run -= 33; + } + put_bits(&s->pb, ff_mpeg12_mbAddrIncrTable[run][1], + ff_mpeg12_mbAddrIncrTable[run][0]); +} + +static av_always_inline void put_qscale(MpegEncContext *s) +{ + if(s->q_scale_type){ + assert(s->qscale>=1 && s->qscale <=12); + put_bits(&s->pb, 5, inv_non_linear_qscale[s->qscale]); + }else{ + put_bits(&s->pb, 5, s->qscale); + } +} + +void ff_mpeg1_encode_slice_header(MpegEncContext *s){ + put_header(s, SLICE_MIN_START_CODE + s->mb_y); + put_qscale(s); + put_bits(&s->pb, 1, 0); /* slice extra information */ +} + +void mpeg1_encode_picture_header(MpegEncContext *s, int picture_number) +{ + mpeg1_encode_sequence_header(s); + + /* mpeg1 picture header */ + put_header(s, PICTURE_START_CODE); + /* temporal reference */ + + // RAL: s->picture_number instead of s->fake_picture_number + put_bits(&s->pb, 10, (s->picture_number - + s->gop_picture_number) & 0x3ff); + put_bits(&s->pb, 3, s->pict_type); + + s->vbv_delay_ptr= s->pb.buf + put_bits_count(&s->pb)/8; + put_bits(&s->pb, 16, 0xFFFF); /* vbv_delay */ + + // RAL: Forward f_code also needed for B frames + if (s->pict_type == FF_P_TYPE || s->pict_type == FF_B_TYPE) { + put_bits(&s->pb, 1, 0); /* half pel coordinates */ + if(s->codec_id == CODEC_ID_MPEG1VIDEO) + put_bits(&s->pb, 3, s->f_code); /* forward_f_code */ + else + put_bits(&s->pb, 3, 7); /* forward_f_code */ + } + + // RAL: Backward f_code necessary for B frames + if (s->pict_type == FF_B_TYPE) { + put_bits(&s->pb, 1, 0); /* half pel coordinates */ + if(s->codec_id == CODEC_ID_MPEG1VIDEO) + put_bits(&s->pb, 3, s->b_code); /* backward_f_code */ + else + put_bits(&s->pb, 3, 7); /* backward_f_code */ + } + + put_bits(&s->pb, 1, 0); /* extra bit picture */ + + s->frame_pred_frame_dct = 1; + if(s->codec_id == CODEC_ID_MPEG2VIDEO){ + put_header(s, EXT_START_CODE); + put_bits(&s->pb, 4, 8); //pic ext + if (s->pict_type == FF_P_TYPE || s->pict_type == FF_B_TYPE) { + put_bits(&s->pb, 4, s->f_code); + put_bits(&s->pb, 4, s->f_code); + }else{ + put_bits(&s->pb, 8, 255); + } + if (s->pict_type == FF_B_TYPE) { + put_bits(&s->pb, 4, s->b_code); + put_bits(&s->pb, 4, s->b_code); + }else{ + put_bits(&s->pb, 8, 255); + } + put_bits(&s->pb, 2, s->intra_dc_precision); + + assert(s->picture_structure == PICT_FRAME); + put_bits(&s->pb, 2, s->picture_structure); + if (s->progressive_sequence) { + put_bits(&s->pb, 1, 0); /* no repeat */ + } else { + put_bits(&s->pb, 1, s->current_picture_ptr->top_field_first); + } + /* XXX: optimize the generation of this flag with entropy + measures */ + s->frame_pred_frame_dct = s->progressive_sequence; + + put_bits(&s->pb, 1, s->frame_pred_frame_dct); + put_bits(&s->pb, 1, s->concealment_motion_vectors); + put_bits(&s->pb, 1, s->q_scale_type); + put_bits(&s->pb, 1, s->intra_vlc_format); + put_bits(&s->pb, 1, s->alternate_scan); + put_bits(&s->pb, 1, s->repeat_first_field); + s->progressive_frame = s->progressive_sequence; + put_bits(&s->pb, 1, s->chroma_format == CHROMA_420 ? s->progressive_frame : 0); /* chroma_420_type */ + put_bits(&s->pb, 1, s->progressive_frame); + put_bits(&s->pb, 1, 0); //composite_display_flag + } + if(s->flags & CODEC_FLAG_SVCD_SCAN_OFFSET){ + int i; + + put_header(s, USER_START_CODE); + for(i=0; ipb, 8, svcd_scan_offset_placeholder[i]); + } + } + + s->mb_y=0; + ff_mpeg1_encode_slice_header(s); +} + +static inline void put_mb_modes(MpegEncContext *s, int n, int bits, + int has_mv, int field_motion) +{ + put_bits(&s->pb, n, bits); + if (!s->frame_pred_frame_dct) { + if (has_mv) + put_bits(&s->pb, 2, 2 - field_motion); /* motion_type: frame/field */ + put_bits(&s->pb, 1, s->interlaced_dct); + } +} + +static av_always_inline void mpeg1_encode_mb_internal(MpegEncContext *s, + DCTELEM block[6][64], + int motion_x, int motion_y, + int mb_block_count) +{ + int i, cbp; + const int mb_x = s->mb_x; + const int mb_y = s->mb_y; + const int first_mb= mb_x == s->resync_mb_x && mb_y == s->resync_mb_y; + + /* compute cbp */ + cbp = 0; + for(i=0;iblock_last_index[i] >= 0) + cbp |= 1 << (mb_block_count - 1 - i); + } + + if (cbp == 0 && !first_mb && s->mv_type == MV_TYPE_16X16 && + (mb_x != s->mb_width - 1 || (mb_y != s->mb_height - 1 && s->codec_id == CODEC_ID_MPEG1VIDEO)) && + ((s->pict_type == FF_P_TYPE && (motion_x | motion_y) == 0) || + (s->pict_type == FF_B_TYPE && s->mv_dir == s->last_mv_dir && (((s->mv_dir & MV_DIR_FORWARD) ? ((s->mv[0][0][0] - s->last_mv[0][0][0])|(s->mv[0][0][1] - s->last_mv[0][0][1])) : 0) | + ((s->mv_dir & MV_DIR_BACKWARD) ? ((s->mv[1][0][0] - s->last_mv[1][0][0])|(s->mv[1][0][1] - s->last_mv[1][0][1])) : 0)) == 0))) { + s->mb_skip_run++; + s->qscale -= s->dquant; + s->skip_count++; + s->misc_bits++; + s->last_bits++; + if(s->pict_type == FF_P_TYPE){ + s->last_mv[0][1][0]= s->last_mv[0][0][0]= + s->last_mv[0][1][1]= s->last_mv[0][0][1]= 0; + } + } else { + if(first_mb){ + assert(s->mb_skip_run == 0); + encode_mb_skip_run(s, s->mb_x); + }else{ + encode_mb_skip_run(s, s->mb_skip_run); + } + + if (s->pict_type == FF_I_TYPE) { + if(s->dquant && cbp){ + put_mb_modes(s, 2, 1, 0, 0); /* macroblock_type : macroblock_quant = 1 */ + put_qscale(s); + }else{ + put_mb_modes(s, 1, 1, 0, 0); /* macroblock_type : macroblock_quant = 0 */ + s->qscale -= s->dquant; + } + s->misc_bits+= get_bits_diff(s); + s->i_count++; + } else if (s->mb_intra) { + if(s->dquant && cbp){ + put_mb_modes(s, 6, 0x01, 0, 0); + put_qscale(s); + }else{ + put_mb_modes(s, 5, 0x03, 0, 0); + s->qscale -= s->dquant; + } + s->misc_bits+= get_bits_diff(s); + s->i_count++; + memset(s->last_mv, 0, sizeof(s->last_mv)); + } else if (s->pict_type == FF_P_TYPE) { + if(s->mv_type == MV_TYPE_16X16){ + if (cbp != 0) { + if ((motion_x|motion_y) == 0) { + if(s->dquant){ + put_mb_modes(s, 5, 1, 0, 0); /* macroblock_pattern & quant */ + put_qscale(s); + }else{ + put_mb_modes(s, 2, 1, 0, 0); /* macroblock_pattern only */ + } + s->misc_bits+= get_bits_diff(s); + } else { + if(s->dquant){ + put_mb_modes(s, 5, 2, 1, 0); /* motion + cbp */ + put_qscale(s); + }else{ + put_mb_modes(s, 1, 1, 1, 0); /* motion + cbp */ + } + s->misc_bits+= get_bits_diff(s); + mpeg1_encode_motion(s, motion_x - s->last_mv[0][0][0], s->f_code); // RAL: f_code parameter added + mpeg1_encode_motion(s, motion_y - s->last_mv[0][0][1], s->f_code); // RAL: f_code parameter added + s->mv_bits+= get_bits_diff(s); + } + } else { + put_bits(&s->pb, 3, 1); /* motion only */ + if (!s->frame_pred_frame_dct) + put_bits(&s->pb, 2, 2); /* motion_type: frame */ + s->misc_bits+= get_bits_diff(s); + mpeg1_encode_motion(s, motion_x - s->last_mv[0][0][0], s->f_code); // RAL: f_code parameter added + mpeg1_encode_motion(s, motion_y - s->last_mv[0][0][1], s->f_code); // RAL: f_code parameter added + s->qscale -= s->dquant; + s->mv_bits+= get_bits_diff(s); + } + s->last_mv[0][1][0]= s->last_mv[0][0][0]= motion_x; + s->last_mv[0][1][1]= s->last_mv[0][0][1]= motion_y; + }else{ + assert(!s->frame_pred_frame_dct && s->mv_type == MV_TYPE_FIELD); + + if (cbp) { + if(s->dquant){ + put_mb_modes(s, 5, 2, 1, 1); /* motion + cbp */ + put_qscale(s); + }else{ + put_mb_modes(s, 1, 1, 1, 1); /* motion + cbp */ + } + } else { + put_bits(&s->pb, 3, 1); /* motion only */ + put_bits(&s->pb, 2, 1); /* motion_type: field */ + s->qscale -= s->dquant; + } + s->misc_bits+= get_bits_diff(s); + for(i=0; i<2; i++){ + put_bits(&s->pb, 1, s->field_select[0][i]); + mpeg1_encode_motion(s, s->mv[0][i][0] - s->last_mv[0][i][0] , s->f_code); + mpeg1_encode_motion(s, s->mv[0][i][1] - (s->last_mv[0][i][1]>>1), s->f_code); + s->last_mv[0][i][0]= s->mv[0][i][0]; + s->last_mv[0][i][1]= 2*s->mv[0][i][1]; + } + s->mv_bits+= get_bits_diff(s); + } + if(cbp) { + if (s->chroma_y_shift) { + put_bits(&s->pb, ff_mpeg12_mbPatTable[cbp][1], ff_mpeg12_mbPatTable[cbp][0]); + } else { + put_bits(&s->pb, ff_mpeg12_mbPatTable[cbp>>2][1], ff_mpeg12_mbPatTable[cbp>>2][0]); + put_sbits(&s->pb, 2, cbp); + } + } + s->f_count++; + } else{ + if(s->mv_type == MV_TYPE_16X16){ + if (cbp){ // With coded bloc pattern + if (s->dquant) { + if(s->mv_dir == MV_DIR_FORWARD) + put_mb_modes(s, 6, 3, 1, 0); + else + put_mb_modes(s, 8-s->mv_dir, 2, 1, 0); + put_qscale(s); + } else { + put_mb_modes(s, 5-s->mv_dir, 3, 1, 0); + } + }else{ // No coded bloc pattern + put_bits(&s->pb, 5-s->mv_dir, 2); + if (!s->frame_pred_frame_dct) + put_bits(&s->pb, 2, 2); /* motion_type: frame */ + s->qscale -= s->dquant; + } + s->misc_bits += get_bits_diff(s); + if (s->mv_dir&MV_DIR_FORWARD){ + mpeg1_encode_motion(s, s->mv[0][0][0] - s->last_mv[0][0][0], s->f_code); + mpeg1_encode_motion(s, s->mv[0][0][1] - s->last_mv[0][0][1], s->f_code); + s->last_mv[0][0][0]=s->last_mv[0][1][0]= s->mv[0][0][0]; + s->last_mv[0][0][1]=s->last_mv[0][1][1]= s->mv[0][0][1]; + s->f_count++; + } + if (s->mv_dir&MV_DIR_BACKWARD){ + mpeg1_encode_motion(s, s->mv[1][0][0] - s->last_mv[1][0][0], s->b_code); + mpeg1_encode_motion(s, s->mv[1][0][1] - s->last_mv[1][0][1], s->b_code); + s->last_mv[1][0][0]=s->last_mv[1][1][0]= s->mv[1][0][0]; + s->last_mv[1][0][1]=s->last_mv[1][1][1]= s->mv[1][0][1]; + s->b_count++; + } + }else{ + assert(s->mv_type == MV_TYPE_FIELD); + assert(!s->frame_pred_frame_dct); + if (cbp){ // With coded bloc pattern + if (s->dquant) { + if(s->mv_dir == MV_DIR_FORWARD) + put_mb_modes(s, 6, 3, 1, 1); + else + put_mb_modes(s, 8-s->mv_dir, 2, 1, 1); + put_qscale(s); + } else { + put_mb_modes(s, 5-s->mv_dir, 3, 1, 1); + } + }else{ // No coded bloc pattern + put_bits(&s->pb, 5-s->mv_dir, 2); + put_bits(&s->pb, 2, 1); /* motion_type: field */ + s->qscale -= s->dquant; + } + s->misc_bits += get_bits_diff(s); + if (s->mv_dir&MV_DIR_FORWARD){ + for(i=0; i<2; i++){ + put_bits(&s->pb, 1, s->field_select[0][i]); + mpeg1_encode_motion(s, s->mv[0][i][0] - s->last_mv[0][i][0] , s->f_code); + mpeg1_encode_motion(s, s->mv[0][i][1] - (s->last_mv[0][i][1]>>1), s->f_code); + s->last_mv[0][i][0]= s->mv[0][i][0]; + s->last_mv[0][i][1]= 2*s->mv[0][i][1]; + } + s->f_count++; + } + if (s->mv_dir&MV_DIR_BACKWARD){ + for(i=0; i<2; i++){ + put_bits(&s->pb, 1, s->field_select[1][i]); + mpeg1_encode_motion(s, s->mv[1][i][0] - s->last_mv[1][i][0] , s->b_code); + mpeg1_encode_motion(s, s->mv[1][i][1] - (s->last_mv[1][i][1]>>1), s->b_code); + s->last_mv[1][i][0]= s->mv[1][i][0]; + s->last_mv[1][i][1]= 2*s->mv[1][i][1]; + } + s->b_count++; + } + } + s->mv_bits += get_bits_diff(s); + if(cbp) { + if (s->chroma_y_shift) { + put_bits(&s->pb, ff_mpeg12_mbPatTable[cbp][1], ff_mpeg12_mbPatTable[cbp][0]); + } else { + put_bits(&s->pb, ff_mpeg12_mbPatTable[cbp>>2][1], ff_mpeg12_mbPatTable[cbp>>2][0]); + put_sbits(&s->pb, 2, cbp); + } + } + } + for(i=0;imb_skip_run = 0; + if(s->mb_intra) + s->i_tex_bits+= get_bits_diff(s); + else + s->p_tex_bits+= get_bits_diff(s); + } +} + +void mpeg1_encode_mb(MpegEncContext *s, DCTELEM block[6][64], int motion_x, int motion_y) +{ + if (s->chroma_format == CHROMA_420) mpeg1_encode_mb_internal(s, block, motion_x, motion_y, 6); + else mpeg1_encode_mb_internal(s, block, motion_x, motion_y, 8); +} + +// RAL: Parameter added: f_or_b_code +static void mpeg1_encode_motion(MpegEncContext *s, int val, int f_or_b_code) +{ + int code, bit_size, l, bits, range, sign; + + if (val == 0) { + /* zero vector */ + code = 0; + put_bits(&s->pb, + ff_mpeg12_mbMotionVectorTable[0][1], + ff_mpeg12_mbMotionVectorTable[0][0]); + } else { + bit_size = f_or_b_code - 1; + range = 1 << bit_size; + /* modulo encoding */ + l= INT_BIT - 5 - bit_size; + val= (val<>l; + + if (val >= 0) { + val--; + code = (val >> bit_size) + 1; + bits = val & (range - 1); + sign = 0; + } else { + val = -val; + val--; + code = (val >> bit_size) + 1; + bits = val & (range - 1); + sign = 1; + } + + assert(code > 0 && code <= 16); + + put_bits(&s->pb, + ff_mpeg12_mbMotionVectorTable[code][1], + ff_mpeg12_mbMotionVectorTable[code][0]); + + put_bits(&s->pb, 1, sign); + if (bit_size > 0) { + put_bits(&s->pb, bit_size, bits); + } + } +} + +void ff_mpeg1_encode_init(MpegEncContext *s) +{ + static int done=0; + + ff_mpeg12_common_init(s); + + if(!done){ + int f_code; + int mv; + int i; + + done=1; + init_rl(&ff_rl_mpeg1, ff_mpeg12_static_rl_table_store[0]); + init_rl(&ff_rl_mpeg2, ff_mpeg12_static_rl_table_store[1]); + + for(i=0; i<64; i++) + { + mpeg1_max_level[0][i]= ff_rl_mpeg1.max_level[0][i]; + mpeg1_index_run[0][i]= ff_rl_mpeg1.index_run[0][i]; + } + + init_uni_ac_vlc(&ff_rl_mpeg1, uni_mpeg1_ac_vlc_len); + if(s->intra_vlc_format) + init_uni_ac_vlc(&ff_rl_mpeg2, uni_mpeg2_ac_vlc_len); + + /* build unified dc encoding tables */ + for(i=-255; i<256; i++) + { + int adiff, index; + int bits, code; + int diff=i; + + adiff = FFABS(diff); + if(diff<0) diff--; + index = av_log2(2*adiff); + + bits= ff_mpeg12_vlc_dc_lum_bits[index] + index; + code= (ff_mpeg12_vlc_dc_lum_code[index]<> bit_size) + 1; + if(code<17){ + len= ff_mpeg12_mbMotionVectorTable[code][1] + 1 + bit_size; + }else{ + len= ff_mpeg12_mbMotionVectorTable[16][1] + 2 + bit_size; + } + } + + mv_penalty[f_code][mv+MAX_MV]= len; + } + } + + + for(f_code=MAX_FCODE; f_code>0; f_code--){ + for(mv=-(8<me.mv_penalty= mv_penalty; + s->fcode_tab= fcode_tab; + if(s->codec_id == CODEC_ID_MPEG1VIDEO){ + s->min_qcoeff=-255; + s->max_qcoeff= 255; + }else{ + s->min_qcoeff=-2047; + s->max_qcoeff= 2047; + } + if (s->intra_vlc_format) { + s->intra_ac_vlc_length= + s->intra_ac_vlc_last_length= uni_mpeg2_ac_vlc_len; + } else { + s->intra_ac_vlc_length= + s->intra_ac_vlc_last_length= uni_mpeg1_ac_vlc_len; + } + s->inter_ac_vlc_length= + s->inter_ac_vlc_last_length= uni_mpeg1_ac_vlc_len; +} + +static inline void encode_dc(MpegEncContext *s, int diff, int component) +{ + if(((unsigned) (diff+255)) >= 511){ + int index; + + if(diff<0){ + index= av_log2_16bit(-2*diff); + diff--; + }else{ + index= av_log2_16bit(2*diff); + } + if (component == 0) { + put_bits( + &s->pb, + ff_mpeg12_vlc_dc_lum_bits[index] + index, + (ff_mpeg12_vlc_dc_lum_code[index]<pb, + ff_mpeg12_vlc_dc_chroma_bits[index] + index, + (ff_mpeg12_vlc_dc_chroma_code[index]<pb, + mpeg1_lum_dc_uni[diff+255]&0xFF, + mpeg1_lum_dc_uni[diff+255]>>8); + } else { + put_bits( + &s->pb, + mpeg1_chr_dc_uni[diff+255]&0xFF, + mpeg1_chr_dc_uni[diff+255]>>8); + } + } +} + +static void mpeg1_encode_block(MpegEncContext *s, + DCTELEM *block, + int n) +{ + int alevel, level, last_non_zero, dc, diff, i, j, run, last_index, sign; + int code, component; + const uint16_t (*table_vlc)[2] = ff_rl_mpeg1.table_vlc; + + last_index = s->block_last_index[n]; + + /* DC coef */ + if (s->mb_intra) { + component = (n <= 3 ? 0 : (n&1) + 1); + dc = block[0]; /* overflow is impossible */ + diff = dc - s->last_dc[component]; + encode_dc(s, diff, component); + s->last_dc[component] = dc; + i = 1; + if (s->intra_vlc_format) + table_vlc = ff_rl_mpeg2.table_vlc; + } else { + /* encode the first coefficient : needs to be done here because + it is handled slightly differently */ + level = block[0]; + if (abs(level) == 1) { + code = ((uint32_t)level >> 31); /* the sign bit */ + put_bits(&s->pb, 2, code | 0x02); + i = 1; + } else { + i = 0; + last_non_zero = -1; + goto next_coef; + } + } + + /* now quantify & encode AC coefs */ + last_non_zero = i - 1; + + for(;i<=last_index;i++) { + j = s->intra_scantable.permutated[i]; + level = block[j]; + next_coef: +#if 0 + if (level != 0) + dprintf(s->avctx, "level[%d]=%d\n", i, level); +#endif + /* encode using VLC */ + if (level != 0) { + run = i - last_non_zero - 1; + + alevel= level; + MASK_ABS(sign, alevel) + sign&=1; + + if (alevel <= mpeg1_max_level[0][run]){ + code= mpeg1_index_run[0][run] + alevel - 1; + /* store the vlc & sign at once */ + put_bits(&s->pb, table_vlc[code][1]+1, (table_vlc[code][0]<<1) + sign); + } else { + /* escape seems to be pretty rare <5% so I do not optimize it */ + put_bits(&s->pb, table_vlc[111][1], table_vlc[111][0]); + /* escape: only clip in this case */ + put_bits(&s->pb, 6, run); + if(s->codec_id == CODEC_ID_MPEG1VIDEO){ + if (alevel < 128) { + put_sbits(&s->pb, 8, level); + } else { + if (level < 0) { + put_bits(&s->pb, 16, 0x8001 + level + 255); + } else { + put_sbits(&s->pb, 16, level); + } + } + }else{ + put_sbits(&s->pb, 12, level); + } + } + last_non_zero = i; + } + } + /* end of block */ + put_bits(&s->pb, table_vlc[112][1], table_vlc[112][0]); +} + +AVCodec mpeg1video_encoder = { + "mpeg1video", + CODEC_TYPE_VIDEO, + CODEC_ID_MPEG1VIDEO, + sizeof(MpegEncContext), + encode_init, + MPV_encode_picture, + MPV_encode_end, + .supported_framerates= ff_frame_rate_tab+1, + .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, + .capabilities= CODEC_CAP_DELAY, + .long_name= NULL_IF_CONFIG_SMALL("MPEG-1 video"), +}; + +AVCodec mpeg2video_encoder = { + "mpeg2video", + CODEC_TYPE_VIDEO, + CODEC_ID_MPEG2VIDEO, + sizeof(MpegEncContext), + encode_init, + MPV_encode_picture, + MPV_encode_end, + .supported_framerates= ff_frame_rate_tab+1, + .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_NONE}, + .capabilities= CODEC_CAP_DELAY, + .long_name= NULL_IF_CONFIG_SMALL("MPEG-2 video"), +}; diff --git a/src/add-ons/media/plugins/avcodec/libavcodec/mpeg4audio.c b/src/add-ons/media/plugins/avcodec/libavcodec/mpeg4audio.c new file mode 100644 index 0000000000..ce5c723411 --- /dev/null +++ b/src/add-ons/media/plugins/avcodec/libavcodec/mpeg4audio.c @@ -0,0 +1,83 @@ +/* + * MPEG-4 Audio common code + * Copyright (c) 2008 Baptiste Coudurier + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "bitstream.h" +#include "mpeg4audio.h" + +const int ff_mpeg4audio_sample_rates[16] = { + 96000, 88200, 64000, 48000, 44100, 32000, + 24000, 22050, 16000, 12000, 11025, 8000, 7350 +}; + +const uint8_t ff_mpeg4audio_channels[8] = { + 0, 1, 2, 3, 4, 5, 6, 8 +}; + +static inline int get_object_type(GetBitContext *gb) +{ + int object_type = get_bits(gb, 5); + if (object_type == 31) + object_type = 32 + get_bits(gb, 6); + return object_type; +} + +static inline int get_sample_rate(GetBitContext *gb, int *index) +{ + *index = get_bits(gb, 4); + return *index == 0x0f ? get_bits(gb, 24) : + ff_mpeg4audio_sample_rates[*index]; +} + +int ff_mpeg4audio_get_config(MPEG4AudioConfig *c, const uint8_t *buf, int buf_size) +{ + GetBitContext gb; + int specific_config_bitindex; + + init_get_bits(&gb, buf, buf_size*8); + c->object_type = get_object_type(&gb); + c->sample_rate = get_sample_rate(&gb, &c->sampling_index); + c->chan_config = get_bits(&gb, 4); + c->sbr = -1; + if (c->object_type == 5) { + c->ext_object_type = c->object_type; + c->sbr = 1; + c->ext_sample_rate = get_sample_rate(&gb, &c->ext_sampling_index); + c->object_type = get_object_type(&gb); + } else + c->ext_object_type = 0; + + specific_config_bitindex = get_bits_count(&gb); + + if (c->ext_object_type != 5) { + int bits_left = buf_size*8 - specific_config_bitindex; + for (; bits_left > 15; bits_left--) { + if (show_bits(&gb, 11) == 0x2b7) { // sync extension + get_bits(&gb, 11); + c->ext_object_type = get_object_type(&gb); + if (c->ext_object_type == 5 && (c->sbr = get_bits1(&gb)) == 1) + c->ext_sample_rate = get_sample_rate(&gb, &c->ext_sampling_index); + break; + } else + get_bits1(&gb); // skip 1 bit + } + } + return specific_config_bitindex; +} diff --git a/src/add-ons/media/plugins/avcodec/libavcodec/mpeg4audio.h b/src/add-ons/media/plugins/avcodec/libavcodec/mpeg4audio.h new file mode 100644 index 0000000000..06a1a366db --- /dev/null +++ b/src/add-ons/media/plugins/avcodec/libavcodec/mpeg4audio.h @@ -0,0 +1,49 @@ +/* + * MPEG-4 Audio common header + * Copyright (c) 2008 Baptiste Coudurier + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef FFMPEG_MPEG4AUDIO_H +#define FFMPEG_MPEG4AUDIO_H + +#include + +typedef struct { + int object_type; + int sampling_index; + int sample_rate; + int chan_config; + int sbr; //< -1 implicit, 1 presence + int ext_object_type; + int ext_sampling_index; + int ext_sample_rate; +} MPEG4AudioConfig; + +extern const int ff_mpeg4audio_sample_rates[16]; +extern const uint8_t ff_mpeg4audio_channels[8]; +/** + * Parse MPEG-4 systems extradata to retrieve audio configuration. + * @param[in] c MPEG4AudioConfig structure to fill. + * @param[in] buf Extradata from container. + * @param[in] buf_size Extradata size. + * @return On error -1 is returned, on success AudioSpecificConfig bit index in extradata. + */ +int ff_mpeg4audio_get_config(MPEG4AudioConfig *c, const uint8_t *buf, int buf_size); + +#endif /* FFMPEG_MPEGAUDIO_H */ diff --git a/src/add-ons/media/plugins/avcodec/libavcodec/mpeg4data.h b/src/add-ons/media/plugins/avcodec/libavcodec/mpeg4data.h index 0092a9f5e0..ba8f9463cb 100644 --- a/src/add-ons/media/plugins/avcodec/libavcodec/mpeg4data.h +++ b/src/add-ons/media/plugins/avcodec/libavcodec/mpeg4data.h @@ -1,8 +1,36 @@ +/* + * copyright (c) 2000,2001 Fabrice Bellard + * H263+ support + * copyright (c) 2002-2004 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + /** * @file mpeg4data.h * mpeg4 tables. */ +#ifndef FFMPEG_MPEG4DATA_H +#define FFMPEG_MPEG4DATA_H + +#include +#include "mpegvideo.h" + // shapes #define RECT_SHAPE 0 #define BIN_SHAPE 1 @@ -45,13 +73,13 @@ const uint8_t DCtab_lum[13][2] = { {3,3}, {3,2}, {2,2}, {2,3}, {1,3}, {1,4}, {1,5}, {1,6}, {1,7}, {1,8}, {1,9}, {1,10}, {1,11}, -}; +}; const uint8_t DCtab_chrom[13][2] = { {3,2}, {2,2}, {1,2}, {1,3}, {1,4}, {1,5}, {1,6}, {1,7}, {1,8}, {1,9}, {1,10}, {1,11}, {1,12}, -}; +}; const uint16_t intra_vlc[103][2] = { { 0x2, 2 }, @@ -123,7 +151,8 @@ static RLTable rl_intra = { intra_level, }; -static const uint16_t inter_rvlc[170][2]={ //note this is identical to the intra rvlc except that its reordered +/* Note this is identical to the intra rvlc except that it is reordered. */ +static const uint16_t inter_rvlc[170][2]={ {0x0006, 3},{0x0001, 4},{0x0004, 5},{0x001C, 7}, {0x003C, 8},{0x003D, 8},{0x007C, 9},{0x00FC, 10}, {0x00FD, 10},{0x01FC, 11},{0x01FD, 11},{0x03FC, 12}, @@ -169,54 +198,54 @@ static const uint16_t inter_rvlc[170][2]={ //note this is identical to the intra {0x3F7C, 15},{0x3F7D, 15},{0x0000, 4} }; -static const uint8_t inter_rvlc_run[169]={ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 2, 2, 2, - 2, 2, 2, 2, 3, 3, 3, 3, - 3, 3, 3, 4, 4, 4, 4, 4, - 5, 5, 5, 5, 6, 6, 6, 6, - 7, 7, 7, 7, 8, 8, 8, 9, - 9, 9, 10, 10, 11, 11, 12, 12, -13, 13, 14, 14, 15, 15, 16, 16, -17, 17, 18, 19, 20, 21, 22, 23, -24, 25, 26, 27, 28, 29, 30, 31, -32, 33, 34, 35, 36, 37, 38, - 0, 0, 0, 0, 0, 1, 1, 1, - 1, 1, 2, 2, 2, 3, 3, 4, - 4, 5, 5, 6, 6, 7, 7, 8, - 8, 9, 9, 10, 10, 11, 11, 12, -12, 13, 13, 14, 15, 16, 17, 18, -19, 20, 21, 22, 23, 24, 25, 26, -27, 28, 29, 30, 31, 32, 33, 34, -35, 36, 37, 38, 39, 40, 41, 42, -43, 44, +static const int8_t inter_rvlc_run[169]={ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 2, 2, 2, + 2, 2, 2, 2, 3, 3, 3, 3, + 3, 3, 3, 4, 4, 4, 4, 4, + 5, 5, 5, 5, 6, 6, 6, 6, + 7, 7, 7, 7, 8, 8, 8, 9, + 9, 9, 10, 10, 11, 11, 12, 12, +13, 13, 14, 14, 15, 15, 16, 16, +17, 17, 18, 19, 20, 21, 22, 23, +24, 25, 26, 27, 28, 29, 30, 31, +32, 33, 34, 35, 36, 37, 38, + 0, 0, 0, 0, 0, 1, 1, 1, + 1, 1, 2, 2, 2, 3, 3, 4, + 4, 5, 5, 6, 6, 7, 7, 8, + 8, 9, 9, 10, 10, 11, 11, 12, +12, 13, 13, 14, 15, 16, 17, 18, +19, 20, 21, 22, 23, 24, 25, 26, +27, 28, 29, 30, 31, 32, 33, 34, +35, 36, 37, 38, 39, 40, 41, 42, +43, 44, }; -static const uint8_t inter_rvlc_level[169]={ - 1, 2, 3, 4, 5, 6, 7, 8, - 9, 10, 11, 12, 13, 14, 15, 16, -17, 18, 19, 1, 2, 3, 4, 5, - 6, 7, 8, 9, 10, 1, 2, 3, - 4, 5, 6, 7, 1, 2, 3, 4, - 5, 6, 7, 1, 2, 3, 4, 5, - 1, 2, 3, 4, 1, 2, 3, 4, - 1, 2, 3, 4, 1, 2, 3, 1, - 2, 3, 1, 2, 1, 2, 1, 2, - 1, 2, 1, 2, 1, 2, 1, 2, - 1, 2, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, - 1, 2, 3, 4, 5, 1, 2, 3, - 4, 5, 1, 2, 3, 1, 2, 1, - 2, 1, 2, 1, 2, 1, 2, 1, - 2, 1, 2, 1, 2, 1, 2, 1, - 2, 1, 2, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, +static const int8_t inter_rvlc_level[169]={ + 1, 2, 3, 4, 5, 6, 7, 8, + 9, 10, 11, 12, 13, 14, 15, 16, +17, 18, 19, 1, 2, 3, 4, 5, + 6, 7, 8, 9, 10, 1, 2, 3, + 4, 5, 6, 7, 1, 2, 3, 4, + 5, 6, 7, 1, 2, 3, 4, 5, + 1, 2, 3, 4, 1, 2, 3, 4, + 1, 2, 3, 4, 1, 2, 3, 1, + 2, 3, 1, 2, 1, 2, 1, 2, + 1, 2, 1, 2, 1, 2, 1, 2, + 1, 2, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, + 1, 2, 3, 4, 5, 1, 2, 3, + 4, 5, 1, 2, 3, 1, 2, 1, + 2, 1, 2, 1, 2, 1, 2, 1, + 2, 1, 2, 1, 2, 1, 2, 1, + 2, 1, 2, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, }; static RLTable rvlc_rl_inter = { @@ -273,54 +302,54 @@ static const uint16_t intra_rvlc[170][2]={ {0x3F7C, 15},{0x3F7D, 15},{0x0000, 4} }; -static const uint8_t intra_rvlc_run[169]={ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, - 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 4, 4, 4, 4, - 4, 4, 5, 5, 5, 5, 5, 5, - 6, 6, 6, 6, 6, 7, 7, 7, - 7, 7, 8, 8, 8, 8, 9, 9, - 9, 9, 10, 10, 11, 11, 12, 12, -13, 14, 15, 16, 17, 18, 19, - 0, 0, 0, 0, 0, 1, 1, 1, - 1, 1, 2, 2, 2, 3, 3, 4, - 4, 5, 5, 6, 6, 7, 7, 8, - 8, 9, 9, 10, 10, 11, 11, 12, -12, 13, 13, 14, 15, 16, 17, 18, -19, 20, 21, 22, 23, 24, 25, 26, -27, 28, 29, 30, 31, 32, 33, 34, -35, 36, 37, 38, 39, 40, 41, 42, -43, 44, +static const int8_t intra_rvlc_run[169]={ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 4, 4, 4, 4, + 4, 4, 5, 5, 5, 5, 5, 5, + 6, 6, 6, 6, 6, 7, 7, 7, + 7, 7, 8, 8, 8, 8, 9, 9, + 9, 9, 10, 10, 11, 11, 12, 12, +13, 14, 15, 16, 17, 18, 19, + 0, 0, 0, 0, 0, 1, 1, 1, + 1, 1, 2, 2, 2, 3, 3, 4, + 4, 5, 5, 6, 6, 7, 7, 8, + 8, 9, 9, 10, 10, 11, 11, 12, +12, 13, 13, 14, 15, 16, 17, 18, +19, 20, 21, 22, 23, 24, 25, 26, +27, 28, 29, 30, 31, 32, 33, 34, +35, 36, 37, 38, 39, 40, 41, 42, +43, 44, }; -static const uint8_t intra_rvlc_level[169]={ - 1, 2, 3, 4, 5, 6, 7, 8, - 9, 10, 11, 12, 13, 14, 15, 16, -17, 18, 19, 20, 21, 22, 23, 24, -25, 26, 27, 1, 2, 3, 4, 5, - 6, 7, 8, 9, 10, 11, 12, 13, - 1, 2, 3, 4, 5, 6, 7, 8, - 9, 10, 11, 1, 2, 3, 4, 5, - 6, 7, 8, 9, 1, 2, 3, 4, - 5, 6, 1, 2, 3, 4, 5, 6, - 1, 2, 3, 4, 5, 1, 2, 3, - 4, 5, 1, 2, 3, 4, 1, 2, - 3, 4, 1, 2, 1, 2, 1, 2, - 1, 1, 1, 1, 1, 1, 1, - 1, 2, 3, 4, 5, 1, 2, 3, - 4, 5, 1, 2, 3, 1, 2, 1, - 2, 1, 2, 1, 2, 1, 2, 1, - 2, 1, 2, 1, 2, 1, 2, 1, - 2, 1, 2, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, +static const int8_t intra_rvlc_level[169]={ + 1, 2, 3, 4, 5, 6, 7, 8, + 9, 10, 11, 12, 13, 14, 15, 16, +17, 18, 19, 20, 21, 22, 23, 24, +25, 26, 27, 1, 2, 3, 4, 5, + 6, 7, 8, 9, 10, 11, 12, 13, + 1, 2, 3, 4, 5, 6, 7, 8, + 9, 10, 11, 1, 2, 3, 4, 5, + 6, 7, 8, 9, 1, 2, 3, 4, + 5, 6, 1, 2, 3, 4, 5, 6, + 1, 2, 3, 4, 5, 1, 2, 3, + 4, 5, 1, 2, 3, 4, 1, 2, + 3, 4, 1, 2, 1, 2, 1, 2, + 1, 1, 1, 1, 1, 1, 1, + 1, 2, 3, 4, 5, 1, 2, 3, + 4, 5, 1, 2, 3, 1, 2, 1, + 2, 1, 2, 1, 2, 1, 2, 1, + 2, 1, 2, 1, 2, 1, 2, 1, + 2, 1, 2, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, }; static RLTable rvlc_rl_intra = { @@ -333,7 +362,7 @@ static RLTable rvlc_rl_intra = { static const uint16_t sprite_trajectory_tab[15][2] = { {0x00, 2}, {0x02, 3}, {0x03, 3}, {0x04, 3}, {0x05, 3}, {0x06, 3}, - {0x0E, 4}, {0x1E, 5}, {0x3E, 6}, {0x7E, 7}, {0xFE, 8}, + {0x0E, 4}, {0x1E, 5}, {0x3E, 6}, {0x7E, 7}, {0xFE, 8}, {0x1FE, 9},{0x3FE, 10},{0x7FE, 11},{0xFFE, 12}, }; @@ -369,7 +398,7 @@ const int16_t ff_mpeg4_default_intra_matrix[64] = { 22, 23, 24, 26, 28, 30, 32, 35, 23, 24, 26, 28, 30, 32, 35, 38, 25, 26, 28, 30, 32, 35, 38, 41, - 27, 28, 30, 32, 35, 38, 41, 45, + 27, 28, 30, 32, 35, 38, 41, 45, }; const int16_t ff_mpeg4_default_non_intra_matrix[64] = { @@ -383,11 +412,11 @@ const int16_t ff_mpeg4_default_non_intra_matrix[64] = { 23, 24, 25, 27, 28, 30, 31, 33, }; -uint8_t ff_mpeg4_y_dc_scale_table[32]={ +const uint8_t ff_mpeg4_y_dc_scale_table[32]={ // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 0, 8, 8, 8, 8,10,12,14,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,34,36,38,40,42,44,46 }; -uint8_t ff_mpeg4_c_dc_scale_table[32]={ +const uint8_t ff_mpeg4_c_dc_scale_table[32]={ // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 0, 8, 8, 8, 8, 9, 9,10,10,11,11,12,12,13,13,14,14,15,15,16,16,17,17,18,18,19,20,21,22,23,24,25 }; @@ -399,3 +428,5 @@ const uint16_t ff_mpeg4_resync_prefix[8]={ static const uint8_t mpeg4_dc_threshold[8]={ 99, 13, 15, 17, 19, 21, 23, 0 }; + +#endif /* FFMPEG_MPEG4DATA_H */ diff --git a/src/add-ons/media/plugins/avcodec/libavcodec/mpeg4video_parser.c b/src/add-ons/media/plugins/avcodec/libavcodec/mpeg4video_parser.c new file mode 100644 index 0000000000..9accc91265 --- /dev/null +++ b/src/add-ons/media/plugins/avcodec/libavcodec/mpeg4video_parser.c @@ -0,0 +1,138 @@ +/* + * MPEG4 Video frame extraction + * Copyright (c) 2003 Fabrice Bellard. + * Copyright (c) 2003 Michael Niedermayer. + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "parser.h" +#include "mpegvideo.h" +#include "mpeg4video_parser.h" + + +int ff_mpeg4_find_frame_end(ParseContext *pc, const uint8_t *buf, int buf_size){ + int vop_found, i; + uint32_t state; + + vop_found= pc->frame_start_found; + state= pc->state; + + i=0; + if(!vop_found){ + for(i=0; iframe_start_found=0; + pc->state=-1; + return i-3; + } + } + } + pc->frame_start_found= vop_found; + pc->state= state; + return END_NOT_FOUND; +} + +/* XXX: make it use less memory */ +static int av_mpeg4_decode_header(AVCodecParserContext *s1, + AVCodecContext *avctx, + const uint8_t *buf, int buf_size) +{ + ParseContext1 *pc = s1->priv_data; + MpegEncContext *s = pc->enc; + GetBitContext gb1, *gb = &gb1; + int ret; + + s->avctx = avctx; + s->current_picture_ptr = &s->current_picture; + + if (avctx->extradata_size && pc->first_picture){ + init_get_bits(gb, avctx->extradata, avctx->extradata_size*8); + ret = ff_mpeg4_decode_picture_header(s, gb); + } + + init_get_bits(gb, buf, 8 * buf_size); + ret = ff_mpeg4_decode_picture_header(s, gb); + if (s->width) { + avcodec_set_dimensions(avctx, s->width, s->height); + } + s1->pict_type= s->pict_type; + pc->first_picture = 0; + return ret; +} + +static int mpeg4video_parse_init(AVCodecParserContext *s) +{ + ParseContext1 *pc = s->priv_data; + + pc->enc = av_mallocz(sizeof(MpegEncContext)); + if (!pc->enc) + return -1; + pc->first_picture = 1; + return 0; +} + +static int mpeg4video_parse(AVCodecParserContext *s, + AVCodecContext *avctx, + const uint8_t **poutbuf, int *poutbuf_size, + const uint8_t *buf, int buf_size) +{ + ParseContext *pc = s->priv_data; + int next; + + if(s->flags & PARSER_FLAG_COMPLETE_FRAMES){ + next= buf_size; + }else{ + next= ff_mpeg4_find_frame_end(pc, buf, buf_size); + + if (ff_combine_frame(pc, next, &buf, &buf_size) < 0) { + *poutbuf = NULL; + *poutbuf_size = 0; + return buf_size; + } + } + av_mpeg4_decode_header(s, avctx, buf, buf_size); + + *poutbuf = buf; + *poutbuf_size = buf_size; + return next; +} + + +AVCodecParser mpeg4video_parser = { + { CODEC_ID_MPEG4 }, + sizeof(ParseContext1), + mpeg4video_parse_init, + mpeg4video_parse, + ff_parse1_close, + ff_mpeg4video_split, +}; diff --git a/src/add-ons/media/plugins/avcodec/libavcodec/mpeg4video_parser.h b/src/add-ons/media/plugins/avcodec/libavcodec/mpeg4video_parser.h new file mode 100644 index 0000000000..125f6aa501 --- /dev/null +++ b/src/add-ons/media/plugins/avcodec/libavcodec/mpeg4video_parser.h @@ -0,0 +1,34 @@ +/* + * MPEG4 video parser prototypes + * Copyright (c) 2003 Fabrice Bellard. + * Copyright (c) 2003 Michael Niedermayer. + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef FFMPEG_MPEG4VIDEO_PARSER_H +#define FFMPEG_MPEG4VIDEO_PARSER_H + +#include "parser.h" + +/** + * finds the end of the current frame in the bitstream. + * @return the position of the first byte of the next frame, or -1 + */ +int ff_mpeg4_find_frame_end(ParseContext *pc, const uint8_t *buf, int buf_size); + +#endif /* FFMPEG_MPEG4VIDEO_PARSER_H */ diff --git a/src/add-ons/media/plugins/avcodec/libavcodec/mpegaudio.c b/src/add-ons/media/plugins/avcodec/libavcodec/mpegaudio.c index fe79baa1d2..663427a43b 100644 --- a/src/add-ons/media/plugins/avcodec/libavcodec/mpegaudio.c +++ b/src/add-ons/media/plugins/avcodec/libavcodec/mpegaudio.c @@ -1,793 +1,50 @@ /* - * The simplest mpeg audio layer 2 encoder - * Copyright (c) 2000, 2001 Fabrice Bellard. + * MPEG Audio common code + * Copyright (c) 2001, 2002 Fabrice Bellard. * - * This library is free software; you can redistribute it and/or + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. + * version 2.1 of the License, or (at your option) any later version. * - * This library is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ - + /** * @file mpegaudio.c - * The simplest mpeg audio layer 2 encoder. + * MPEG Audio common code. */ - -#include "avcodec.h" + #include "mpegaudio.h" -/* currently, cannot change these constants (need to modify - quantization stage) */ -#define FRAC_BITS 15 -#define WFRAC_BITS 14 -#define MUL(a,b) (((int64_t)(a) * (int64_t)(b)) >> FRAC_BITS) -#define FIX(a) ((int)((a) * (1 << FRAC_BITS))) -#define SAMPLES_BUF_SIZE 4096 - -typedef struct MpegAudioContext { - PutBitContext pb; - int nb_channels; - int freq, bit_rate; - int lsf; /* 1 if mpeg2 low bitrate selected */ - int bitrate_index; /* bit rate */ - int freq_index; - int frame_size; /* frame size, in bits, without padding */ - int64_t nb_samples; /* total number of samples encoded */ - /* padding computation */ - int frame_frac, frame_frac_incr, do_padding; - short samples_buf[MPA_MAX_CHANNELS][SAMPLES_BUF_SIZE]; /* buffer for filter */ - int samples_offset[MPA_MAX_CHANNELS]; /* offset in samples_buf */ - int sb_samples[MPA_MAX_CHANNELS][3][12][SBLIMIT]; - unsigned char scale_factors[MPA_MAX_CHANNELS][SBLIMIT][3]; /* scale factors */ - /* code to group 3 scale factors */ - unsigned char scale_code[MPA_MAX_CHANNELS][SBLIMIT]; - int sblimit; /* number of used subbands */ - const unsigned char *alloc_table; -} MpegAudioContext; - -/* define it to use floats in quantization (I don't like floats !) */ -//#define USE_FLOATS - -#include "mpegaudiotab.h" - -static int MPA_encode_init(AVCodecContext *avctx) +/* bitrate is in kb/s */ +int ff_mpa_l2_select_table(int bitrate, int nb_channels, int freq, int lsf) { - MpegAudioContext *s = avctx->priv_data; - int freq = avctx->sample_rate; - int bitrate = avctx->bit_rate; - int channels = avctx->channels; - int i, v, table; - float a; + int ch_bitrate, table; - if (channels > 2) - return -1; - bitrate = bitrate / 1000; - s->nb_channels = channels; - s->freq = freq; - s->bit_rate = bitrate * 1000; - avctx->frame_size = MPA_FRAME_SIZE; - - /* encoding freq */ - s->lsf = 0; - for(i=0;i<3;i++) { - if (mpa_freq_tab[i] == freq) - break; - if ((mpa_freq_tab[i] / 2) == freq) { - s->lsf = 1; - break; - } - } - if (i == 3) - return -1; - s->freq_index = i; - - /* encoding bitrate & frequency */ - for(i=0;i<15;i++) { - if (mpa_bitrate_tab[s->lsf][1][i] == bitrate) - break; - } - if (i == 15) - return -1; - s->bitrate_index = i; - - /* compute total header size & pad bit */ - - a = (float)(bitrate * 1000 * MPA_FRAME_SIZE) / (freq * 8.0); - s->frame_size = ((int)a) * 8; - - /* frame fractional size to compute padding */ - s->frame_frac = 0; - s->frame_frac_incr = (int)((a - floor(a)) * 65536.0); - - /* select the right allocation table */ - table = l2_select_table(bitrate, s->nb_channels, freq, s->lsf); - - /* number of used subbands */ - s->sblimit = sblimit_table[table]; - s->alloc_table = alloc_tables[table]; - -#ifdef DEBUG - av_log(avctx, AV_LOG_DEBUG, "%d kb/s, %d Hz, frame_size=%d bits, table=%d, padincr=%x\n", - bitrate, freq, s->frame_size, table, s->frame_frac_incr); -#endif - - for(i=0;inb_channels;i++) - s->samples_offset[i] = 0; - - for(i=0;i<257;i++) { - int v; - v = mpa_enwindow[i]; -#if WFRAC_BITS != 16 - v = (v + (1 << (16 - WFRAC_BITS - 1))) >> (16 - WFRAC_BITS); -#endif - filter_bank[i] = v; - if ((i & 63) != 0) - v = -v; - if (i != 0) - filter_bank[512 - i] = v; - } - - for(i=0;i<64;i++) { - v = (int)(pow(2.0, (3 - i) / 3.0) * (1 << 20)); - if (v <= 0) - v = 1; - scale_factor_table[i] = v; -#ifdef USE_FLOATS - scale_factor_inv_table[i] = pow(2.0, -(3 - i) / 3.0) / (float)(1 << 20); -#else -#define P 15 - scale_factor_shift[i] = 21 - P - (i / 3); - scale_factor_mult[i] = (1 << P) * pow(2.0, (i % 3) / 3.0); -#endif - } - for(i=0;i<128;i++) { - v = i - 64; - if (v <= -3) - v = 0; - else if (v < 0) - v = 1; - else if (v == 0) - v = 2; - else if (v < 3) - v = 3; - else - v = 4; - scale_diff_table[i] = v; - } - - for(i=0;i<17;i++) { - v = quant_bits[i]; - if (v < 0) - v = -v; + ch_bitrate = bitrate / nb_channels; + if (!lsf) { + if ((freq == 48000 && ch_bitrate >= 56) || + (ch_bitrate >= 56 && ch_bitrate <= 80)) + table = 0; + else if (freq != 48000 && ch_bitrate >= 96) + table = 1; + else if (freq != 32000 && ch_bitrate <= 48) + table = 2; else - v = v * 3; - total_quant_bits[i] = 12 * v; - } - - avctx->coded_frame= avcodec_alloc_frame(); - avctx->coded_frame->key_frame= 1; - - return 0; -} - -/* 32 point floating point IDCT without 1/sqrt(2) coef zero scaling */ -static void idct32(int *out, int *tab) -{ - int i, j; - int *t, *t1, xr; - const int *xp = costab32; - - for(j=31;j>=3;j-=2) tab[j] += tab[j - 2]; - - t = tab + 30; - t1 = tab + 2; - do { - t[0] += t[-4]; - t[1] += t[1 - 4]; - t -= 4; - } while (t != t1); - - t = tab + 28; - t1 = tab + 4; - do { - t[0] += t[-8]; - t[1] += t[1-8]; - t[2] += t[2-8]; - t[3] += t[3-8]; - t -= 8; - } while (t != t1); - - t = tab; - t1 = tab + 32; - do { - t[ 3] = -t[ 3]; - t[ 6] = -t[ 6]; - - t[11] = -t[11]; - t[12] = -t[12]; - t[13] = -t[13]; - t[15] = -t[15]; - t += 16; - } while (t != t1); - - - t = tab; - t1 = tab + 8; - do { - int x1, x2, x3, x4; - - x3 = MUL(t[16], FIX(SQRT2*0.5)); - x4 = t[0] - x3; - x3 = t[0] + x3; - - x2 = MUL(-(t[24] + t[8]), FIX(SQRT2*0.5)); - x1 = MUL((t[8] - x2), xp[0]); - x2 = MUL((t[8] + x2), xp[1]); - - t[ 0] = x3 + x1; - t[ 8] = x4 - x2; - t[16] = x4 + x2; - t[24] = x3 - x1; - t++; - } while (t != t1); - - xp += 2; - t = tab; - t1 = tab + 4; - do { - xr = MUL(t[28],xp[0]); - t[28] = (t[0] - xr); - t[0] = (t[0] + xr); - - xr = MUL(t[4],xp[1]); - t[ 4] = (t[24] - xr); - t[24] = (t[24] + xr); - - xr = MUL(t[20],xp[2]); - t[20] = (t[8] - xr); - t[ 8] = (t[8] + xr); - - xr = MUL(t[12],xp[3]); - t[12] = (t[16] - xr); - t[16] = (t[16] + xr); - t++; - } while (t != t1); - xp += 4; - - for (i = 0; i < 4; i++) { - xr = MUL(tab[30-i*4],xp[0]); - tab[30-i*4] = (tab[i*4] - xr); - tab[ i*4] = (tab[i*4] + xr); - - xr = MUL(tab[ 2+i*4],xp[1]); - tab[ 2+i*4] = (tab[28-i*4] - xr); - tab[28-i*4] = (tab[28-i*4] + xr); - - xr = MUL(tab[31-i*4],xp[0]); - tab[31-i*4] = (tab[1+i*4] - xr); - tab[ 1+i*4] = (tab[1+i*4] + xr); - - xr = MUL(tab[ 3+i*4],xp[1]); - tab[ 3+i*4] = (tab[29-i*4] - xr); - tab[29-i*4] = (tab[29-i*4] + xr); - - xp += 2; - } - - t = tab + 30; - t1 = tab + 1; - do { - xr = MUL(t1[0], *xp); - t1[0] = (t[0] - xr); - t[0] = (t[0] + xr); - t -= 2; - t1 += 2; - xp++; - } while (t >= tab); - - for(i=0;i<32;i++) { - out[i] = tab[bitinv32[i]]; - } -} - -#define WSHIFT (WFRAC_BITS + 15 - FRAC_BITS) - -static void filter(MpegAudioContext *s, int ch, short *samples, int incr) -{ - short *p, *q; - int sum, offset, i, j; - int tmp[64]; - int tmp1[32]; - int *out; - - // print_pow1(samples, 1152); - - offset = s->samples_offset[ch]; - out = &s->sb_samples[ch][0][0][0]; - for(j=0;j<36;j++) { - /* 32 samples at once */ - for(i=0;i<32;i++) { - s->samples_buf[ch][offset + (31 - i)] = samples[0]; - samples += incr; - } - - /* filter */ - p = s->samples_buf[ch] + offset; - q = filter_bank; - /* maxsum = 23169 */ - for(i=0;i<64;i++) { - sum = p[0*64] * q[0*64]; - sum += p[1*64] * q[1*64]; - sum += p[2*64] * q[2*64]; - sum += p[3*64] * q[3*64]; - sum += p[4*64] * q[4*64]; - sum += p[5*64] * q[5*64]; - sum += p[6*64] * q[6*64]; - sum += p[7*64] * q[7*64]; - tmp[i] = sum; - p++; - q++; - } - tmp1[0] = tmp[16] >> WSHIFT; - for( i=1; i<=16; i++ ) tmp1[i] = (tmp[i+16]+tmp[16-i]) >> WSHIFT; - for( i=17; i<=31; i++ ) tmp1[i] = (tmp[i+16]-tmp[80-i]) >> WSHIFT; - - idct32(out, tmp1); - - /* advance of 32 samples */ - offset -= 32; - out += 32; - /* handle the wrap around */ - if (offset < 0) { - memmove(s->samples_buf[ch] + SAMPLES_BUF_SIZE - (512 - 32), - s->samples_buf[ch], (512 - 32) * 2); - offset = SAMPLES_BUF_SIZE - 512; - } - } - s->samples_offset[ch] = offset; - - // print_pow(s->sb_samples, 1152); -} - -static void compute_scale_factors(unsigned char scale_code[SBLIMIT], - unsigned char scale_factors[SBLIMIT][3], - int sb_samples[3][12][SBLIMIT], - int sblimit) -{ - int *p, vmax, v, n, i, j, k, code; - int index, d1, d2; - unsigned char *sf = &scale_factors[0][0]; - - for(j=0;j vmax) - vmax = v; - } - /* compute the scale factor index using log 2 computations */ - if (vmax > 0) { - n = av_log2(vmax); - /* n is the position of the MSB of vmax. now - use at most 2 compares to find the index */ - index = (21 - n) * 3 - 3; - if (index >= 0) { - while (vmax <= scale_factor_table[index+1]) - index++; - } else { - index = 0; /* very unlikely case of overflow */ - } - } else { - index = 62; /* value 63 is not allowed */ - } - -#if 0 - printf("%2d:%d in=%x %x %d\n", - j, i, vmax, scale_factor_table[index], index); -#endif - /* store the scale factor */ - assert(index >=0 && index <= 63); - sf[i] = index; - } - - /* compute the transmission factor : look if the scale factors - are close enough to each other */ - d1 = scale_diff_table[sf[0] - sf[1] + 64]; - d2 = scale_diff_table[sf[1] - sf[2] + 64]; - - /* handle the 25 cases */ - switch(d1 * 5 + d2) { - case 0*5+0: - case 0*5+4: - case 3*5+4: - case 4*5+0: - case 4*5+4: - code = 0; - break; - case 0*5+1: - case 0*5+2: - case 4*5+1: - case 4*5+2: - code = 3; - sf[2] = sf[1]; - break; - case 0*5+3: - case 4*5+3: - code = 3; - sf[1] = sf[2]; - break; - case 1*5+0: - case 1*5+4: - case 2*5+4: - code = 1; - sf[1] = sf[0]; - break; - case 1*5+1: - case 1*5+2: - case 2*5+0: - case 2*5+1: - case 2*5+2: - code = 2; - sf[1] = sf[2] = sf[0]; - break; - case 2*5+3: - case 3*5+3: - code = 2; - sf[0] = sf[1] = sf[2]; - break; - case 3*5+0: - case 3*5+1: - case 3*5+2: - code = 2; - sf[0] = sf[2] = sf[1]; - break; - case 1*5+3: - code = 2; - if (sf[0] > sf[2]) - sf[0] = sf[2]; - sf[1] = sf[2] = sf[0]; - break; - default: - av_abort(); - } - -#if 0 - printf("%d: %2d %2d %2d %d %d -> %d\n", j, - sf[0], sf[1], sf[2], d1, d2, code); -#endif - scale_code[j] = code; - sf += 3; - } -} - -/* The most important function : psycho acoustic module. In this - encoder there is basically none, so this is the worst you can do, - but also this is the simpler. */ -static void psycho_acoustic_model(MpegAudioContext *s, short smr[SBLIMIT]) -{ - int i; - - for(i=0;isblimit;i++) { - smr[i] = (int)(fixed_smr[i] * 10); - } -} - - -#define SB_NOTALLOCATED 0 -#define SB_ALLOCATED 1 -#define SB_NOMORE 2 - -/* Try to maximize the smr while using a number of bits inferior to - the frame size. I tried to make the code simpler, faster and - smaller than other encoders :-) */ -static void compute_bit_allocation(MpegAudioContext *s, - short smr1[MPA_MAX_CHANNELS][SBLIMIT], - unsigned char bit_alloc[MPA_MAX_CHANNELS][SBLIMIT], - int *padding) -{ - int i, ch, b, max_smr, max_ch, max_sb, current_frame_size, max_frame_size; - int incr; - short smr[MPA_MAX_CHANNELS][SBLIMIT]; - unsigned char subband_status[MPA_MAX_CHANNELS][SBLIMIT]; - const unsigned char *alloc; - - memcpy(smr, smr1, s->nb_channels * sizeof(short) * SBLIMIT); - memset(subband_status, SB_NOTALLOCATED, s->nb_channels * SBLIMIT); - memset(bit_alloc, 0, s->nb_channels * SBLIMIT); - - /* compute frame size and padding */ - max_frame_size = s->frame_size; - s->frame_frac += s->frame_frac_incr; - if (s->frame_frac >= 65536) { - s->frame_frac -= 65536; - s->do_padding = 1; - max_frame_size += 8; + table = 3; } else { - s->do_padding = 0; + table = 4; } - - /* compute the header + bit alloc size */ - current_frame_size = 32; - alloc = s->alloc_table; - for(i=0;isblimit;i++) { - incr = alloc[0]; - current_frame_size += incr * s->nb_channels; - alloc += 1 << incr; - } - for(;;) { - /* look for the subband with the largest signal to mask ratio */ - max_sb = -1; - max_ch = -1; - max_smr = 0x80000000; - for(ch=0;chnb_channels;ch++) { - for(i=0;isblimit;i++) { - if (smr[ch][i] > max_smr && subband_status[ch][i] != SB_NOMORE) { - max_smr = smr[ch][i]; - max_sb = i; - max_ch = ch; - } - } - } -#if 0 - printf("current=%d max=%d max_sb=%d alloc=%d\n", - current_frame_size, max_frame_size, max_sb, - bit_alloc[max_sb]); -#endif - if (max_sb < 0) - break; - - /* find alloc table entry (XXX: not optimal, should use - pointer table) */ - alloc = s->alloc_table; - for(i=0;iscale_code[max_ch][max_sb]] * 6; - incr += total_quant_bits[alloc[1]]; - } else { - /* increments bit allocation */ - b = bit_alloc[max_ch][max_sb]; - incr = total_quant_bits[alloc[b + 1]] - - total_quant_bits[alloc[b]]; - } - - if (current_frame_size + incr <= max_frame_size) { - /* can increase size */ - b = ++bit_alloc[max_ch][max_sb]; - current_frame_size += incr; - /* decrease smr by the resolution we added */ - smr[max_ch][max_sb] = smr1[max_ch][max_sb] - quant_snr[alloc[b]]; - /* max allocation size reached ? */ - if (b == ((1 << alloc[0]) - 1)) - subband_status[max_ch][max_sb] = SB_NOMORE; - else - subband_status[max_ch][max_sb] = SB_ALLOCATED; - } else { - /* cannot increase the size of this subband */ - subband_status[max_ch][max_sb] = SB_NOMORE; - } - } - *padding = max_frame_size - current_frame_size; - assert(*padding >= 0); - -#if 0 - for(i=0;isblimit;i++) { - printf("%d ", bit_alloc[i]); - } - printf("\n"); -#endif + return table; } - -/* - * Output the mpeg audio layer 2 frame. Note how the code is small - * compared to other encoders :-) - */ -static void encode_frame(MpegAudioContext *s, - unsigned char bit_alloc[MPA_MAX_CHANNELS][SBLIMIT], - int padding) -{ - int i, j, k, l, bit_alloc_bits, b, ch; - unsigned char *sf; - int q[3]; - PutBitContext *p = &s->pb; - - /* header */ - - put_bits(p, 12, 0xfff); - put_bits(p, 1, 1 - s->lsf); /* 1 = mpeg1 ID, 0 = mpeg2 lsf ID */ - put_bits(p, 2, 4-2); /* layer 2 */ - put_bits(p, 1, 1); /* no error protection */ - put_bits(p, 4, s->bitrate_index); - put_bits(p, 2, s->freq_index); - put_bits(p, 1, s->do_padding); /* use padding */ - put_bits(p, 1, 0); /* private_bit */ - put_bits(p, 2, s->nb_channels == 2 ? MPA_STEREO : MPA_MONO); - put_bits(p, 2, 0); /* mode_ext */ - put_bits(p, 1, 0); /* no copyright */ - put_bits(p, 1, 1); /* original */ - put_bits(p, 2, 0); /* no emphasis */ - - /* bit allocation */ - j = 0; - for(i=0;isblimit;i++) { - bit_alloc_bits = s->alloc_table[j]; - for(ch=0;chnb_channels;ch++) { - put_bits(p, bit_alloc_bits, bit_alloc[ch][i]); - } - j += 1 << bit_alloc_bits; - } - - /* scale codes */ - for(i=0;isblimit;i++) { - for(ch=0;chnb_channels;ch++) { - if (bit_alloc[ch][i]) - put_bits(p, 2, s->scale_code[ch][i]); - } - } - - /* scale factors */ - for(i=0;isblimit;i++) { - for(ch=0;chnb_channels;ch++) { - if (bit_alloc[ch][i]) { - sf = &s->scale_factors[ch][i][0]; - switch(s->scale_code[ch][i]) { - case 0: - put_bits(p, 6, sf[0]); - put_bits(p, 6, sf[1]); - put_bits(p, 6, sf[2]); - break; - case 3: - case 1: - put_bits(p, 6, sf[0]); - put_bits(p, 6, sf[2]); - break; - case 2: - put_bits(p, 6, sf[0]); - break; - } - } - } - } - - /* quantization & write sub band samples */ - - for(k=0;k<3;k++) { - for(l=0;l<12;l+=3) { - j = 0; - for(i=0;isblimit;i++) { - bit_alloc_bits = s->alloc_table[j]; - for(ch=0;chnb_channels;ch++) { - b = bit_alloc[ch][i]; - if (b) { - int qindex, steps, m, sample, bits; - /* we encode 3 sub band samples of the same sub band at a time */ - qindex = s->alloc_table[j+b]; - steps = quant_steps[qindex]; - for(m=0;m<3;m++) { - sample = s->sb_samples[ch][k][l + m][i]; - /* divide by scale factor */ -#ifdef USE_FLOATS - { - float a; - a = (float)sample * scale_factor_inv_table[s->scale_factors[ch][i][k]]; - q[m] = (int)((a + 1.0) * steps * 0.5); - } -#else - { - int q1, e, shift, mult; - e = s->scale_factors[ch][i][k]; - shift = scale_factor_shift[e]; - mult = scale_factor_mult[e]; - - /* normalize to P bits */ - if (shift < 0) - q1 = sample << (-shift); - else - q1 = sample >> shift; - q1 = (q1 * mult) >> P; - q[m] = ((q1 + (1 << P)) * steps) >> (P + 1); - } -#endif - if (q[m] >= steps) - q[m] = steps - 1; - assert(q[m] >= 0 && q[m] < steps); - } - bits = quant_bits[qindex]; - if (bits < 0) { - /* group the 3 values to save bits */ - put_bits(p, -bits, - q[0] + steps * (q[1] + steps * q[2])); -#if 0 - printf("%d: gr1 %d\n", - i, q[0] + steps * (q[1] + steps * q[2])); -#endif - } else { -#if 0 - printf("%d: gr3 %d %d %d\n", - i, q[0], q[1], q[2]); -#endif - put_bits(p, bits, q[0]); - put_bits(p, bits, q[1]); - put_bits(p, bits, q[2]); - } - } - } - /* next subband in alloc table */ - j += 1 << bit_alloc_bits; - } - } - } - - /* padding */ - for(i=0;ipriv_data; - short *samples = data; - short smr[MPA_MAX_CHANNELS][SBLIMIT]; - unsigned char bit_alloc[MPA_MAX_CHANNELS][SBLIMIT]; - int padding, i; - - for(i=0;inb_channels;i++) { - filter(s, i, samples + i, s->nb_channels); - } - - for(i=0;inb_channels;i++) { - compute_scale_factors(s->scale_code[i], s->scale_factors[i], - s->sb_samples[i], s->sblimit); - } - for(i=0;inb_channels;i++) { - psycho_acoustic_model(s, smr[i]); - } - compute_bit_allocation(s, smr, bit_alloc, &padding); - - init_put_bits(&s->pb, frame, MPA_MAX_CODED_FRAME_SIZE); - - encode_frame(s, bit_alloc, padding); - - s->nb_samples += MPA_FRAME_SIZE; - return pbBufPtr(&s->pb) - s->pb.buf; -} - -static int MPA_encode_close(AVCodecContext *avctx) -{ - av_freep(&avctx->coded_frame); - return 0; -} - -AVCodec mp2_encoder = { - "mp2", - CODEC_TYPE_AUDIO, - CODEC_ID_MP2, - sizeof(MpegAudioContext), - MPA_encode_init, - MPA_encode_frame, - MPA_encode_close, - NULL, -}; - -#undef FIX diff --git a/src/add-ons/media/plugins/avcodec/libavcodec/mpegaudio.h b/src/add-ons/media/plugins/avcodec/libavcodec/mpegaudio.h index e50e8bd6f6..6d602a1dcb 100644 --- a/src/add-ons/media/plugins/avcodec/libavcodec/mpegaudio.h +++ b/src/add-ons/media/plugins/avcodec/libavcodec/mpegaudio.h @@ -1,10 +1,37 @@ +/* + * copyright (c) 2001 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + /** * @file mpegaudio.h * mpeg audio declarations for both encoder and decoder. */ +#ifndef FFMPEG_MPEGAUDIO_H +#define FFMPEG_MPEGAUDIO_H + +#include "avcodec.h" +#include "bitstream.h" +#include "dsputil.h" + /* max frame size, in samples */ -#define MPA_FRAME_SIZE 1152 +#define MPA_FRAME_SIZE 1152 /* max compressed frame size */ #define MPA_MAX_CODED_FRAME_SIZE 1792 @@ -18,14 +45,111 @@ #define MPA_DUAL 2 #define MPA_MONO 3 -int l2_select_table(int bitrate, int nb_channels, int freq, int lsf); -int mpa_decode_header(AVCodecContext *avctx, uint32_t head); +/* header + layer + bitrate + freq + lsf/mpeg25 */ +#define SAME_HEADER_MASK \ + (0xffe00000 | (3 << 17) | (0xf << 12) | (3 << 10) | (3 << 19)) -extern const uint16_t mpa_bitrate_tab[2][3][15]; -extern const uint16_t mpa_freq_tab[3]; -extern const unsigned char *alloc_tables[5]; -extern const double enwindow[512]; -extern const int sblimit_table[5]; -extern const int quant_steps[17]; -extern const int quant_bits[17]; -extern const int32_t mpa_enwindow[257]; +#define MP3_MASK 0xFFFE0CCF + +/* define USE_HIGHPRECISION to have a bit exact (but slower) mpeg + audio decoder */ + +#ifdef USE_HIGHPRECISION +#define FRAC_BITS 23 /* fractional bits for sb_samples and dct */ +#define WFRAC_BITS 16 /* fractional bits for window */ +#else +#define FRAC_BITS 15 /* fractional bits for sb_samples and dct */ +#define WFRAC_BITS 14 /* fractional bits for window */ +#endif + +#define FRAC_ONE (1 << FRAC_BITS) + +#define FIX(a) ((int)((a) * FRAC_ONE)) + +#if defined(USE_HIGHPRECISION) && defined(CONFIG_AUDIO_NONSHORT) +typedef int32_t OUT_INT; +#define OUT_MAX INT32_MAX +#define OUT_MIN INT32_MIN +#define OUT_SHIFT (WFRAC_BITS + FRAC_BITS - 31) +#else +typedef int16_t OUT_INT; +#define OUT_MAX INT16_MAX +#define OUT_MIN INT16_MIN +#define OUT_SHIFT (WFRAC_BITS + FRAC_BITS - 15) +#endif + +#if FRAC_BITS <= 15 +typedef int16_t MPA_INT; +#else +typedef int32_t MPA_INT; +#endif + +#define BACKSTEP_SIZE 512 +#define EXTRABYTES 24 + +struct GranuleDef; + +typedef struct MPADecodeContext { + DECLARE_ALIGNED_8(uint8_t, last_buf[2*BACKSTEP_SIZE + EXTRABYTES]); + int last_buf_size; + int frame_size; + /* next header (used in free format parsing) */ + uint32_t free_format_next_header; + int error_protection; + int layer; + int sample_rate; + int sample_rate_index; /* between 0 and 8 */ + int bit_rate; + GetBitContext gb; + GetBitContext in_gb; + int nb_channels; + int mode; + int mode_ext; + int lsf; + DECLARE_ALIGNED_16(MPA_INT, synth_buf[MPA_MAX_CHANNELS][512 * 2]); + int synth_buf_offset[MPA_MAX_CHANNELS]; + DECLARE_ALIGNED_16(int32_t, sb_samples[MPA_MAX_CHANNELS][36][SBLIMIT]); + int32_t mdct_buf[MPA_MAX_CHANNELS][SBLIMIT * 18]; /* previous samples, for layer 3 MDCT */ +#ifdef DEBUG + int frame_count; +#endif + void (*compute_antialias)(struct MPADecodeContext *s, struct GranuleDef *g); + int adu_mode; ///< 0 for standard mp3, 1 for adu formatted mp3 + int dither_state; + int error_resilience; + AVCodecContext* avctx; +} MPADecodeContext; + +/* layer 3 huffman tables */ +typedef struct HuffTable { + int xsize; + const uint8_t *bits; + const uint16_t *codes; +} HuffTable; + +int ff_mpa_l2_select_table(int bitrate, int nb_channels, int freq, int lsf); +int ff_mpa_decode_header(AVCodecContext *avctx, uint32_t head, int *sample_rate); +void ff_mpa_synth_init(MPA_INT *window); +void ff_mpa_synth_filter(MPA_INT *synth_buf_ptr, int *synth_buf_offset, + MPA_INT *window, int *dither_state, + OUT_INT *samples, int incr, + int32_t sb_samples[SBLIMIT]); + +/* fast header check for resync */ +static inline int ff_mpa_check_header(uint32_t header){ + /* header */ + if ((header & 0xffe00000) != 0xffe00000) + return -1; + /* layer check */ + if ((header & (3<<17)) == 0) + return -1; + /* bit rate */ + if ((header & (0xf<<12)) == 0xf<<12) + return -1; + /* frequency */ + if ((header & (3<<10)) == 3<<10) + return -1; + return 0; +} + +#endif /* FFMPEG_MPEGAUDIO_H */ diff --git a/src/add-ons/media/plugins/avcodec/libavcodec/mpegaudio_parser.c b/src/add-ons/media/plugins/avcodec/libavcodec/mpegaudio_parser.c new file mode 100644 index 0000000000..e7cb7439e5 --- /dev/null +++ b/src/add-ons/media/plugins/avcodec/libavcodec/mpegaudio_parser.c @@ -0,0 +1,252 @@ +/* + * MPEG Audio parser + * Copyright (c) 2003 Fabrice Bellard. + * Copyright (c) 2003 Michael Niedermayer. + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "parser.h" +#include "mpegaudio.h" +#include "mpegaudiodecheader.h" + + +typedef struct MpegAudioParseContext { + uint8_t inbuf[MPA_MAX_CODED_FRAME_SIZE]; /* input buffer */ + uint8_t *inbuf_ptr; + int frame_size; + int free_format_frame_size; + int free_format_next_header; + uint32_t header; + int header_count; +} MpegAudioParseContext; + +#define MPA_HEADER_SIZE 4 + +/* header + layer + bitrate + freq + lsf/mpeg25 */ +#undef SAME_HEADER_MASK /* mpegaudio.h defines different version */ +#define SAME_HEADER_MASK \ + (0xffe00000 | (3 << 17) | (3 << 10) | (3 << 19)) + +/* useful helper to get mpeg audio stream infos. Return -1 if error in + header, otherwise the coded frame size in bytes */ +int ff_mpa_decode_header(AVCodecContext *avctx, uint32_t head, int *sample_rate) +{ + MPADecodeContext s1, *s = &s1; + s1.avctx = avctx; + + if (ff_mpa_check_header(head) != 0) + return -1; + + if (ff_mpegaudio_decode_header(s, head) != 0) { + return -1; + } + + switch(s->layer) { + case 1: + avctx->frame_size = 384; + break; + case 2: + avctx->frame_size = 1152; + break; + default: + case 3: + if (s->lsf) + avctx->frame_size = 576; + else + avctx->frame_size = 1152; + break; + } + + *sample_rate = s->sample_rate; + avctx->channels = s->nb_channels; + avctx->bit_rate = s->bit_rate; + avctx->sub_id = s->layer; + return s->frame_size; +} + +static int mpegaudio_parse_init(AVCodecParserContext *s1) +{ + MpegAudioParseContext *s = s1->priv_data; + s->inbuf_ptr = s->inbuf; + return 0; +} + +static int mpegaudio_parse(AVCodecParserContext *s1, + AVCodecContext *avctx, + const uint8_t **poutbuf, int *poutbuf_size, + const uint8_t *buf, int buf_size) +{ + MpegAudioParseContext *s = s1->priv_data; + int len, ret, sr; + uint32_t header; + const uint8_t *buf_ptr; + + *poutbuf = NULL; + *poutbuf_size = 0; + buf_ptr = buf; + while (buf_size > 0) { + len = s->inbuf_ptr - s->inbuf; + if (s->frame_size == 0) { + /* special case for next header for first frame in free + format case (XXX: find a simpler method) */ + if (s->free_format_next_header != 0) { + AV_WB32(s->inbuf, s->free_format_next_header); + s->inbuf_ptr = s->inbuf + 4; + s->free_format_next_header = 0; + goto got_header; + } + /* no header seen : find one. We need at least MPA_HEADER_SIZE + bytes to parse it */ + len = FFMIN(MPA_HEADER_SIZE - len, buf_size); + if (len > 0) { + memcpy(s->inbuf_ptr, buf_ptr, len); + buf_ptr += len; + buf_size -= len; + s->inbuf_ptr += len; + } + if ((s->inbuf_ptr - s->inbuf) >= MPA_HEADER_SIZE) { + got_header: + header = AV_RB32(s->inbuf); + + ret = ff_mpa_decode_header(avctx, header, &sr); + if (ret < 0) { + s->header_count= -2; + /* no sync found : move by one byte (inefficient, but simple!) */ + memmove(s->inbuf, s->inbuf + 1, s->inbuf_ptr - s->inbuf - 1); + s->inbuf_ptr--; + dprintf(avctx, "skip %x\n", header); + /* reset free format frame size to give a chance + to get a new bitrate */ + s->free_format_frame_size = 0; + } else { + if((header&SAME_HEADER_MASK) != (s->header&SAME_HEADER_MASK) && s->header) + s->header_count= -3; + s->header= header; + s->header_count++; + s->frame_size = ret; + +#if 0 + /* free format: prepare to compute frame size */ + if (ff_mpegaudio_decode_header(s, header) == 1) { + s->frame_size = -1; + } +#endif + if(s->header_count > 1) + avctx->sample_rate= sr; + } + } + } else +#if 0 + if (s->frame_size == -1) { + /* free format : find next sync to compute frame size */ + len = MPA_MAX_CODED_FRAME_SIZE - len; + if (len > buf_size) + len = buf_size; + if (len == 0) { + /* frame too long: resync */ + s->frame_size = 0; + memmove(s->inbuf, s->inbuf + 1, s->inbuf_ptr - s->inbuf - 1); + s->inbuf_ptr--; + } else { + uint8_t *p, *pend; + uint32_t header1; + int padding; + + memcpy(s->inbuf_ptr, buf_ptr, len); + /* check for header */ + p = s->inbuf_ptr - 3; + pend = s->inbuf_ptr + len - 4; + while (p <= pend) { + header = AV_RB32(p); + header1 = AV_RB32(s->inbuf); + /* check with high probability that we have a + valid header */ + if ((header & SAME_HEADER_MASK) == + (header1 & SAME_HEADER_MASK)) { + /* header found: update pointers */ + len = (p + 4) - s->inbuf_ptr; + buf_ptr += len; + buf_size -= len; + s->inbuf_ptr = p; + /* compute frame size */ + s->free_format_next_header = header; + s->free_format_frame_size = s->inbuf_ptr - s->inbuf; + padding = (header1 >> 9) & 1; + if (s->layer == 1) + s->free_format_frame_size -= padding * 4; + else + s->free_format_frame_size -= padding; + dprintf(avctx, "free frame size=%d padding=%d\n", + s->free_format_frame_size, padding); + ff_mpegaudio_decode_header(s, header1); + goto next_data; + } + p++; + } + /* not found: simply increase pointers */ + buf_ptr += len; + s->inbuf_ptr += len; + buf_size -= len; + } + } else +#endif + if (len < s->frame_size) { + if (s->frame_size > MPA_MAX_CODED_FRAME_SIZE) + s->frame_size = MPA_MAX_CODED_FRAME_SIZE; + len = FFMIN(s->frame_size - len, buf_size); + memcpy(s->inbuf_ptr, buf_ptr, len); + buf_ptr += len; + s->inbuf_ptr += len; + buf_size -= len; + } + + if(s->frame_size > 0 && buf_ptr - buf == s->inbuf_ptr - s->inbuf + && buf_size + buf_ptr - buf >= s->frame_size){ + if(s->header_count > 0){ + *poutbuf = buf; + *poutbuf_size = s->frame_size; + } + buf_ptr = buf + s->frame_size; + s->inbuf_ptr = s->inbuf; + s->frame_size = 0; + break; + } + + // next_data: + if (s->frame_size > 0 && + (s->inbuf_ptr - s->inbuf) >= s->frame_size) { + if(s->header_count > 0){ + *poutbuf = s->inbuf; + *poutbuf_size = s->inbuf_ptr - s->inbuf; + } + s->inbuf_ptr = s->inbuf; + s->frame_size = 0; + break; + } + } + return buf_ptr - buf; +} + + +AVCodecParser mpegaudio_parser = { + { CODEC_ID_MP2, CODEC_ID_MP3 }, + sizeof(MpegAudioParseContext), + mpegaudio_parse_init, + mpegaudio_parse, + NULL, +}; diff --git a/src/add-ons/media/plugins/avcodec/libavcodec/mpegaudiodata.c b/src/add-ons/media/plugins/avcodec/libavcodec/mpegaudiodata.c new file mode 100644 index 0000000000..ee8fd9f0aa --- /dev/null +++ b/src/add-ons/media/plugins/avcodec/libavcodec/mpegaudiodata.c @@ -0,0 +1,225 @@ +/* + * MPEG Audio common tables + * copyright (c) 2002 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file mpegaudiodata.c + * mpeg audio layer common tables. + */ + +#include "mpegaudiodata.h" + + +const uint16_t ff_mpa_bitrate_tab[2][3][15] = { + { {0, 32, 64, 96, 128, 160, 192, 224, 256, 288, 320, 352, 384, 416, 448 }, + {0, 32, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384 }, + {0, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320 } }, + { {0, 32, 48, 56, 64, 80, 96, 112, 128, 144, 160, 176, 192, 224, 256}, + {0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160}, + {0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160} + } +}; + +const uint16_t ff_mpa_freq_tab[3] = { 44100, 48000, 32000 }; + +/*******************************************************/ +/* half mpeg encoding window (full precision) */ +const int32_t ff_mpa_enwindow[257] = { + 0, -1, -1, -1, -1, -1, -1, -2, + -2, -2, -2, -3, -3, -4, -4, -5, + -5, -6, -7, -7, -8, -9, -10, -11, + -13, -14, -16, -17, -19, -21, -24, -26, + -29, -31, -35, -38, -41, -45, -49, -53, + -58, -63, -68, -73, -79, -85, -91, -97, + -104, -111, -117, -125, -132, -139, -147, -154, + -161, -169, -176, -183, -190, -196, -202, -208, + 213, 218, 222, 225, 227, 228, 228, 227, + 224, 221, 215, 208, 200, 189, 177, 163, + 146, 127, 106, 83, 57, 29, -2, -36, + -72, -111, -153, -197, -244, -294, -347, -401, + -459, -519, -581, -645, -711, -779, -848, -919, + -991, -1064, -1137, -1210, -1283, -1356, -1428, -1498, + -1567, -1634, -1698, -1759, -1817, -1870, -1919, -1962, + -2001, -2032, -2057, -2075, -2085, -2087, -2080, -2063, + 2037, 2000, 1952, 1893, 1822, 1739, 1644, 1535, + 1414, 1280, 1131, 970, 794, 605, 402, 185, + -45, -288, -545, -814, -1095, -1388, -1692, -2006, + -2330, -2663, -3004, -3351, -3705, -4063, -4425, -4788, + -5153, -5517, -5879, -6237, -6589, -6935, -7271, -7597, + -7910, -8209, -8491, -8755, -8998, -9219, -9416, -9585, + -9727, -9838, -9916, -9959, -9966, -9935, -9863, -9750, + -9592, -9389, -9139, -8840, -8492, -8092, -7640, -7134, + 6574, 5959, 5288, 4561, 3776, 2935, 2037, 1082, + 70, -998, -2122, -3300, -4533, -5818, -7154, -8540, + -9975,-11455,-12980,-14548,-16155,-17799,-19478,-21189, +-22929,-24694,-26482,-28289,-30112,-31947,-33791,-35640, +-37489,-39336,-41176,-43006,-44821,-46617,-48390,-50137, +-51853,-53534,-55178,-56778,-58333,-59838,-61289,-62684, +-64019,-65290,-66494,-67629,-68692,-69679,-70590,-71420, +-72169,-72835,-73415,-73908,-74313,-74630,-74856,-74992, + 75038, +}; + +/*******************************************************/ +/* layer 2 tables */ + +const int ff_mpa_sblimit_table[5] = { 27 , 30 , 8, 12 , 30 }; + +const int ff_mpa_quant_steps[17] = { + 3, 5, 7, 9, 15, + 31, 63, 127, 255, 511, + 1023, 2047, 4095, 8191, 16383, + 32767, 65535 +}; + +/* we use a negative value if grouped */ +const int ff_mpa_quant_bits[17] = { + -5, -7, 3, -10, 4, + 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, + 15, 16 +}; + +/* encoding tables which give the quantization index. Note how it is + possible to store them efficiently ! */ +static const unsigned char alloc_table_0[] = { + 4, 0, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 4, 0, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 4, 0, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, + 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, + 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, + 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, + 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, + 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, + 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, + 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, + 3, 0, 1, 2, 3, 4, 5, 16, + 3, 0, 1, 2, 3, 4, 5, 16, + 3, 0, 1, 2, 3, 4, 5, 16, + 3, 0, 1, 2, 3, 4, 5, 16, + 3, 0, 1, 2, 3, 4, 5, 16, + 3, 0, 1, 2, 3, 4, 5, 16, + 3, 0, 1, 2, 3, 4, 5, 16, + 3, 0, 1, 2, 3, 4, 5, 16, + 3, 0, 1, 2, 3, 4, 5, 16, + 3, 0, 1, 2, 3, 4, 5, 16, + 3, 0, 1, 2, 3, 4, 5, 16, + 3, 0, 1, 2, 3, 4, 5, 16, + 2, 0, 1, 16, + 2, 0, 1, 16, + 2, 0, 1, 16, + 2, 0, 1, 16, +}; + +static const unsigned char alloc_table_1[] = { + 4, 0, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 4, 0, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 4, 0, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, + 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, + 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, + 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, + 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, + 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, + 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, + 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, + 3, 0, 1, 2, 3, 4, 5, 16, + 3, 0, 1, 2, 3, 4, 5, 16, + 3, 0, 1, 2, 3, 4, 5, 16, + 3, 0, 1, 2, 3, 4, 5, 16, + 3, 0, 1, 2, 3, 4, 5, 16, + 3, 0, 1, 2, 3, 4, 5, 16, + 3, 0, 1, 2, 3, 4, 5, 16, + 3, 0, 1, 2, 3, 4, 5, 16, + 3, 0, 1, 2, 3, 4, 5, 16, + 3, 0, 1, 2, 3, 4, 5, 16, + 3, 0, 1, 2, 3, 4, 5, 16, + 3, 0, 1, 2, 3, 4, 5, 16, + 2, 0, 1, 16, + 2, 0, 1, 16, + 2, 0, 1, 16, + 2, 0, 1, 16, + 2, 0, 1, 16, + 2, 0, 1, 16, + 2, 0, 1, 16, +}; + +static const unsigned char alloc_table_2[] = { + 4, 0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 4, 0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 3, 0, 1, 3, 4, 5, 6, 7, + 3, 0, 1, 3, 4, 5, 6, 7, + 3, 0, 1, 3, 4, 5, 6, 7, + 3, 0, 1, 3, 4, 5, 6, 7, + 3, 0, 1, 3, 4, 5, 6, 7, + 3, 0, 1, 3, 4, 5, 6, 7, +}; + +static const unsigned char alloc_table_3[] = { + 4, 0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 4, 0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 3, 0, 1, 3, 4, 5, 6, 7, + 3, 0, 1, 3, 4, 5, 6, 7, + 3, 0, 1, 3, 4, 5, 6, 7, + 3, 0, 1, 3, 4, 5, 6, 7, + 3, 0, 1, 3, 4, 5, 6, 7, + 3, 0, 1, 3, 4, 5, 6, 7, + 3, 0, 1, 3, 4, 5, 6, 7, + 3, 0, 1, 3, 4, 5, 6, 7, + 3, 0, 1, 3, 4, 5, 6, 7, + 3, 0, 1, 3, 4, 5, 6, 7, +}; + +static const unsigned char alloc_table_4[] = { + 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 3, 0, 1, 3, 4, 5, 6, 7, + 3, 0, 1, 3, 4, 5, 6, 7, + 3, 0, 1, 3, 4, 5, 6, 7, + 3, 0, 1, 3, 4, 5, 6, 7, + 3, 0, 1, 3, 4, 5, 6, 7, + 3, 0, 1, 3, 4, 5, 6, 7, + 3, 0, 1, 3, 4, 5, 6, 7, + 2, 0, 1, 3, + 2, 0, 1, 3, + 2, 0, 1, 3, + 2, 0, 1, 3, + 2, 0, 1, 3, + 2, 0, 1, 3, + 2, 0, 1, 3, + 2, 0, 1, 3, + 2, 0, 1, 3, + 2, 0, 1, 3, + 2, 0, 1, 3, + 2, 0, 1, 3, + 2, 0, 1, 3, + 2, 0, 1, 3, + 2, 0, 1, 3, + 2, 0, 1, 3, + 2, 0, 1, 3, + 2, 0, 1, 3, + 2, 0, 1, 3, +}; + +const unsigned char * const ff_mpa_alloc_tables[5] = +{ alloc_table_0, alloc_table_1, alloc_table_2, alloc_table_3, alloc_table_4, }; diff --git a/src/add-ons/media/plugins/avcodec/libavcodec/mpegaudiodata.h b/src/add-ons/media/plugins/avcodec/libavcodec/mpegaudiodata.h new file mode 100644 index 0000000000..3682158f92 --- /dev/null +++ b/src/add-ons/media/plugins/avcodec/libavcodec/mpegaudiodata.h @@ -0,0 +1,43 @@ +/* + * MPEG Audio common tables + * copyright (c) 2002 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file mpegaudiodata.h + * mpeg audio layer common tables. + */ + +#ifndef FFMPEG_MPEGAUDIODATA_H +#define FFMPEG_MPEGAUDIODATA_H + +#include "common.h" + +#define MODE_EXT_MS_STEREO 2 +#define MODE_EXT_I_STEREO 1 + +extern const uint16_t ff_mpa_bitrate_tab[2][3][15]; +extern const uint16_t ff_mpa_freq_tab[3]; +extern const int32_t ff_mpa_enwindow[257]; +extern const int ff_mpa_sblimit_table[5]; +extern const int ff_mpa_quant_steps[17]; +extern const int ff_mpa_quant_bits[17]; +extern const unsigned char * const ff_mpa_alloc_tables[5]; + +#endif /* FFMPEG_MPEGAUDIODATA_H */ diff --git a/src/add-ons/media/plugins/avcodec/libavcodec/mpegaudiodec.c b/src/add-ons/media/plugins/avcodec/libavcodec/mpegaudiodec.c index 09e9b8cdbc..61e0c6fbd6 100644 --- a/src/add-ons/media/plugins/avcodec/libavcodec/mpegaudiodec.c +++ b/src/add-ons/media/plugins/avcodec/libavcodec/mpegaudiodec.c @@ -2,29 +2,31 @@ * MPEG Audio decoder * Copyright (c) 2001, 2002 Fabrice Bellard. * - * This library is free software; you can redistribute it and/or + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. + * version 2.1 of the License, or (at your option) any later version. * - * This library is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file mpegaudiodec.c * MPEG Audio decoder. - */ + */ //#define DEBUG #include "avcodec.h" -#include "mpegaudio.h" +#include "bitstream.h" #include "dsputil.h" /* @@ -36,68 +38,23 @@ /* define USE_HIGHPRECISION to have a bit exact (but slower) mpeg audio decoder */ #ifdef CONFIG_MPEGAUDIO_HP -#define USE_HIGHPRECISION +# define USE_HIGHPRECISION #endif -#ifdef USE_HIGHPRECISION -#define FRAC_BITS 23 /* fractional bits for sb_samples and dct */ -#define WFRAC_BITS 16 /* fractional bits for window */ -#else -#define FRAC_BITS 15 /* fractional bits for sb_samples and dct */ -#define WFRAC_BITS 14 /* fractional bits for window */ -#endif +#include "mpegaudio.h" +#include "mpegaudiodecheader.h" -#define FRAC_ONE (1 << FRAC_BITS) +#include "mathops.h" -#define MULL(a,b) (((int64_t)(a) * (int64_t)(b)) >> FRAC_BITS) -#define MUL64(a,b) ((int64_t)(a) * (int64_t)(b)) -#define FIX(a) ((int)((a) * FRAC_ONE)) /* WARNING: only correct for posititive numbers */ #define FIXR(a) ((int)((a) * FRAC_ONE + 0.5)) #define FRAC_RND(a) (((a) + (FRAC_ONE/2)) >> FRAC_BITS) -#if FRAC_BITS <= 15 -typedef int16_t MPA_INT; -#else -typedef int32_t MPA_INT; -#endif +#define FIXHR(a) ((int)((a) * (1LL<<32) + 0.5)) /****************/ #define HEADER_SIZE 4 -#define BACKSTEP_SIZE 512 - -struct GranuleDef; - -typedef struct MPADecodeContext { - uint8_t inbuf1[2][MPA_MAX_CODED_FRAME_SIZE + BACKSTEP_SIZE]; /* input buffer */ - int inbuf_index; - uint8_t *inbuf_ptr, *inbuf; - int frame_size; - int free_format_frame_size; /* frame size in case of free format - (zero if currently unknown) */ - /* next header (used in free format parsing) */ - uint32_t free_format_next_header; - int error_protection; - int layer; - int sample_rate; - int sample_rate_index; /* between 0 and 8 */ - int bit_rate; - int old_frame_size; - GetBitContext gb; - int nb_channels; - int mode; - int mode_ext; - int lsf; - MPA_INT synth_buf[MPA_MAX_CHANNELS][512 * 2] __attribute__((aligned(16))); - int synth_buf_offset[MPA_MAX_CHANNELS]; - int32_t sb_samples[MPA_MAX_CHANNELS][36][SBLIMIT] __attribute__((aligned(16))); - int32_t mdct_buf[MPA_MAX_CHANNELS][SBLIMIT * 18]; /* previous samples, for layer 3 MDCT */ -#ifdef DEBUG - int frame_count; -#endif - void (*compute_antialias)(struct MPADecodeContext *s, struct GranuleDef *g); -} MPADecodeContext; /* layer 3 "granule" */ typedef struct GranuleDef { @@ -119,35 +76,35 @@ typedef struct GranuleDef { int32_t sb_hybrid[SBLIMIT * 18]; /* 576 samples */ } GranuleDef; -#define MODE_EXT_MS_STEREO 2 -#define MODE_EXT_I_STEREO 1 - -/* layer 3 huffman tables */ -typedef struct HuffTable { - int xsize; - const uint8_t *bits; - const uint16_t *codes; -} HuffTable; - +#include "mpegaudiodata.h" #include "mpegaudiodectab.h" static void compute_antialias_integer(MPADecodeContext *s, GranuleDef *g); static void compute_antialias_float(MPADecodeContext *s, GranuleDef *g); /* vlc structure for decoding layer 3 huffman tables */ -static VLC huff_vlc[16]; -static uint8_t *huff_code_table[16]; +static VLC huff_vlc[16]; +static VLC_TYPE huff_vlc_tables[ + 0+128+128+128+130+128+154+166+ + 142+204+190+170+542+460+662+414 + ][2]; +static const int huff_vlc_tables_sizes[16] = { + 0, 128, 128, 128, 130, 128, 154, 166, + 142, 204, 190, 170, 542, 460, 662, 414 +}; static VLC huff_quad_vlc[2]; +static VLC_TYPE huff_quad_vlc_tables[128+16][2]; +static const int huff_quad_vlc_tables_sizes[2] = { + 128, 16 +}; /* computed from band_size_long */ static uint16_t band_index_long[9][23]; /* XXX: free when all decoders are closed */ -#define TABLE_4_3_SIZE (8191 + 16) -static int8_t *table_4_3_exp; -#if FRAC_BITS <= 15 -static uint16_t *table_4_3_value; -#else -static uint32_t *table_4_3_value; -#endif +#define TABLE_4_3_SIZE (8191 + 16)*4 +static int8_t table_4_3_exp[TABLE_4_3_SIZE]; +static uint32_t table_4_3_value[TABLE_4_3_SIZE]; +static uint32_t exp_table[512]; +static uint32_t expval_table[512][16]; /* intensity stereo coef table */ static int32_t is_table[2][16]; static int32_t is_table_lsf[2][2][16]; @@ -164,22 +121,76 @@ static int32_t scale_factor_mult[15][3]; #define SCALE_GEN(v) \ { FIXR(1.0 * (v)), FIXR(0.7937005259 * (v)), FIXR(0.6299605249 * (v)) } -static int32_t scale_factor_mult2[3][3] = { +static const int32_t scale_factor_mult2[3][3] = { SCALE_GEN(4.0 / 3.0), /* 3 steps */ SCALE_GEN(4.0 / 5.0), /* 5 steps */ SCALE_GEN(4.0 / 9.0), /* 9 steps */ }; -/* 2^(n/4) */ -static uint32_t scale_factor_mult3[4] = { - FIXR(1.0), - FIXR(1.18920711500272106671), - FIXR(1.41421356237309504880), - FIXR(1.68179283050742908605), -}; +static DECLARE_ALIGNED_16(MPA_INT, window[512]); + +/** + * Convert region offsets to region sizes and truncate + * size to big_values. + */ +void ff_region_offset2size(GranuleDef *g){ + int i, k, j=0; + g->region_size[2] = (576 / 2); + for(i=0;i<3;i++) { + k = FFMIN(g->region_size[i], g->big_values); + g->region_size[i] = k - j; + j = k; + } +} + +void ff_init_short_region(MPADecodeContext *s, GranuleDef *g){ + if (g->block_type == 2) + g->region_size[0] = (36 / 2); + else { + if (s->sample_rate_index <= 2) + g->region_size[0] = (36 / 2); + else if (s->sample_rate_index != 8) + g->region_size[0] = (54 / 2); + else + g->region_size[0] = (108 / 2); + } + g->region_size[1] = (576 / 2); +} + +void ff_init_long_region(MPADecodeContext *s, GranuleDef *g, int ra1, int ra2){ + int l; + g->region_size[0] = + band_index_long[s->sample_rate_index][ra1 + 1] >> 1; + /* should not overflow */ + l = FFMIN(ra1 + ra2 + 2, 22); + g->region_size[1] = + band_index_long[s->sample_rate_index][l] >> 1; +} + +void ff_compute_band_indexes(MPADecodeContext *s, GranuleDef *g){ + if (g->block_type == 2) { + if (g->switch_point) { + /* if switched mode, we handle the 36 first samples as + long blocks. For 8000Hz, we handle the 48 first + exponents as long blocks (XXX: check this!) */ + if (s->sample_rate_index <= 2) + g->long_end = 8; + else if (s->sample_rate_index != 8) + g->long_end = 6; + else + g->long_end = 4; /* 8000 Hz */ + + g->short_start = 2 + (s->sample_rate_index != 8); + } else { + g->long_end = 0; + g->short_start = 0; + } + } else { + g->short_start = 13; + g->long_end = 22; + } +} -static MPA_INT window[512] __attribute__((aligned(16))); - /* layer 1 unscaling */ /* n = number of bits of the mantissa minus 1 */ static inline int l1_unscale(int n, int mant, int scale_factor) @@ -214,30 +225,18 @@ static inline int l2_unscale_group(int steps, int mant, int scale_factor) /* compute value^(4/3) * 2^(exponent/4). It normalized to FRAC_BITS */ static inline int l3_unscale(int value, int exponent) { -#if FRAC_BITS <= 15 unsigned int m; -#else - uint64_t m; -#endif int e; - e = table_4_3_exp[value]; - e += (exponent >> 2); - e = FRAC_BITS - e; -#if FRAC_BITS <= 15 + e = table_4_3_exp [4*value + (exponent&3)]; + m = table_4_3_value[4*value + (exponent&3)]; + e -= (exponent >> 2); + assert(e>=1); if (e > 31) - e = 31; -#endif - m = table_4_3_value[value]; -#if FRAC_BITS <= 15 - m = (m * scale_factor_mult3[exponent & 3]); + return 0; m = (m + (1 << (e-1))) >> e; + return m; -#else - m = MUL64(m, scale_factor_mult3[exponent & 3]); - m = (m + (uint64_t_C(1) << (e-1))) >> e; - return m; -#endif } /* all integer n^(4/3) computation code */ @@ -250,11 +249,13 @@ static inline int l3_unscale(int value, int exponent) static int dev_4_3_coefs[DEV_ORDER]; +#if 0 /* unused */ static int pow_mult3[3] = { POW_FIX(1.0), POW_FIX(1.25992104989487316476), POW_FIX(1.58740105196819947474), }; +#endif static void int_pow_init(void) { @@ -267,12 +268,13 @@ static void int_pow_init(void) } } +#if 0 /* unused, remove? */ /* return the mantissa and the binary exponent */ static int int_pow(int i, int *exp_ptr) { int e, er, eq, j; int a, a1; - + /* renormalize */ a = i; e = POW_FRAC_BITS; @@ -311,6 +313,7 @@ static int int_pow(int i, int *exp_ptr) *exp_ptr = eq; return a; } +#endif static int decode_init(AVCodecContext * avctx) { @@ -318,12 +321,23 @@ static int decode_init(AVCodecContext * avctx) static int init=0; int i, j, k; - if(avctx->antialias_algo == FF_AA_INT) + s->avctx = avctx; + +#if defined(USE_HIGHPRECISION) && defined(CONFIG_AUDIO_NONSHORT) + avctx->sample_fmt= SAMPLE_FMT_S32; +#else + avctx->sample_fmt= SAMPLE_FMT_S16; +#endif + s->error_resilience= avctx->error_resilience; + + if(avctx->antialias_algo != FF_AA_FLOAT) s->compute_antialias= compute_antialias_integer; else s->compute_antialias= compute_antialias_float; if (!init && !avctx->parse_only) { + int offset; + /* scale factors table for layer 1/2 */ for(i=0;i<64;i++) { int shift, mod; @@ -337,58 +351,62 @@ static int decode_init(AVCodecContext * avctx) for(i=0;i<15;i++) { int n, norm; n = i + 2; - norm = ((int64_t_C(1) << n) * FRAC_ONE) / ((1 << n) - 1); + norm = ((INT64_C(1) << n) * FRAC_ONE) / ((1 << n) - 1); scale_factor_mult[i][0] = MULL(FIXR(1.0 * 2.0), norm); scale_factor_mult[i][1] = MULL(FIXR(0.7937005259 * 2.0), norm); scale_factor_mult[i][2] = MULL(FIXR(0.6299605249 * 2.0), norm); - dprintf("%d: norm=%x s=%x %x %x\n", - i, norm, + dprintf(avctx, "%d: norm=%x s=%x %x %x\n", + i, norm, scale_factor_mult[i][0], scale_factor_mult[i][1], scale_factor_mult[i][2]); } - - /* window */ - /* max = 18760, max sum over all 16 coefs : 44736 */ - for(i=0;i<257;i++) { - int v; - v = mpa_enwindow[i]; -#if WFRAC_BITS < 16 - v = (v + (1 << (16 - WFRAC_BITS - 1))) >> (16 - WFRAC_BITS); -#endif - window[i] = v; - if ((i & 63) != 0) - v = -v; - if (i != 0) - window[512 - i] = v; - } - + + ff_mpa_synth_init(window); + /* huffman decode tables */ - huff_code_table[0] = NULL; + offset = 0; for(i=1;i<16;i++) { const HuffTable *h = &mpa_huff_tables[i]; - int xsize, x, y; + int xsize, x, y; unsigned int n; - uint8_t *code_table; + uint8_t tmp_bits [512]; + uint16_t tmp_codes[512]; + + memset(tmp_bits , 0, sizeof(tmp_bits )); + memset(tmp_codes, 0, sizeof(tmp_codes)); xsize = h->xsize; n = xsize * xsize; - /* XXX: fail test */ - init_vlc(&huff_vlc[i], 8, n, - h->bits, 1, 1, h->codes, 2, 2); - - code_table = av_mallocz(n); + j = 0; for(x=0;xbits [j ]; + tmp_codes[(x << 5) | y | ((x&&y)<<4)]= h->codes[j++]; + } } - huff_code_table[i] = code_table; + + /* XXX: fail test */ + huff_vlc[i].table = huff_vlc_tables+offset; + huff_vlc[i].table_allocated = huff_vlc_tables_sizes[i]; + init_vlc(&huff_vlc[i], 7, 512, + tmp_bits, 1, 1, tmp_codes, 2, 2, + INIT_VLC_USE_NEW_STATIC); + offset += huff_vlc_tables_sizes[i]; } + assert(offset == sizeof(huff_vlc_tables)/(sizeof(VLC_TYPE)*2)); + + offset = 0; for(i=0;i<2;i++) { - init_vlc(&huff_quad_vlc[i], i == 0 ? 7 : 4, 16, - mpa_quad_bits[i], 1, 1, mpa_quad_codes[i], 1, 1); + huff_quad_vlc[i].table = huff_quad_vlc_tables+offset; + huff_quad_vlc[i].table_allocated = huff_quad_vlc_tables_sizes[i]; + init_vlc(&huff_quad_vlc[i], i == 0 ? 7 : 4, 16, + mpa_quad_bits[i], 1, 1, mpa_quad_codes[i], 1, 1, + INIT_VLC_USE_NEW_STATIC); + offset += huff_quad_vlc_tables_sizes[i]; } + assert(offset == sizeof(huff_quad_vlc_tables)/(sizeof(VLC_TYPE)*2)); for(i=0;i<9;i++) { k = 0; @@ -399,44 +417,30 @@ static int decode_init(AVCodecContext * avctx) band_index_long[i][22] = k; } - /* compute n ^ (4/3) and store it in mantissa/exp format */ - if (!av_mallocz_static(&table_4_3_exp, - TABLE_4_3_SIZE * sizeof(table_4_3_exp[0]))) - return -1; - if (!av_mallocz_static(&table_4_3_value, - TABLE_4_3_SIZE * sizeof(table_4_3_value[0]))) - return -1; - + /* compute n ^ (4/3) and store it in mantissa/exp format */ + int_pow_init(); for(i=1;i> 1; - e1++; - } -#endif - e1--; - if (m != m1 || e != e1) { - printf("%4d: m=%x m1=%x e=%d e1=%d\n", - i, m, m1, e, e1); - } - } -#endif + f = pow((double)(i/4), 4.0 / 3.0) * pow(2, (i&3)*0.25); + fm = frexp(f, &e); + m = (uint32_t)(fm*(1LL<<31) + 0.5); + e+= FRAC_BITS - 31 + 5 - 100; + /* normalized to FRAC_BITS */ table_4_3_value[i] = m; - table_4_3_exp[i] = e; +// av_log(NULL, AV_LOG_DEBUG, "%d %d %f\n", i, m, pow((double)i, 4.0 / 3.0)); + table_4_3_exp[i] = -e; } - + for(i=0; i<512*16; i++){ + int exponent= (i>>4); + double f= pow(i&15, 4.0 / 3.0) * pow(2, (exponent-400)*0.25 + FRAC_BITS + 5); + expval_table[exponent][i&15]= llrint(f); + if((i&15)==1) + exp_table[exponent]= llrint(f); + } + for(i=0;i<7;i++) { float f; int v; @@ -463,7 +467,7 @@ static int decode_init(AVCodecContext * avctx) k = i & 1; is_table_lsf[j][k ^ 1][i] = FIXR(f); is_table_lsf[j][k][i] = FIXR(1.0); - dprintf("is_table_lsf %d %d: %x %x\n", + dprintf(avctx, "is_table_lsf %d %d: %x %x\n", i, j, is_table_lsf[j][0][i], is_table_lsf[j][1][i]); } } @@ -473,38 +477,47 @@ static int decode_init(AVCodecContext * avctx) ci = ci_table[i]; cs = 1.0 / sqrt(1.0 + ci * ci); ca = cs * ci; - csa_table[i][0] = FIX(cs); - csa_table[i][1] = FIX(ca); - csa_table[i][2] = FIX(ca) + FIX(cs); - csa_table[i][3] = FIX(ca) - FIX(cs); + csa_table[i][0] = FIXHR(cs/4); + csa_table[i][1] = FIXHR(ca/4); + csa_table[i][2] = FIXHR(ca/4) + FIXHR(cs/4); + csa_table[i][3] = FIXHR(ca/4) - FIXHR(cs/4); csa_table_float[i][0] = cs; csa_table_float[i][1] = ca; csa_table_float[i][2] = ca + cs; - csa_table_float[i][3] = ca - cs; + csa_table_float[i][3] = ca - cs; // printf("%d %d %d %d\n", FIX(cs), FIX(cs-1), FIX(ca), FIX(cs)-FIX(ca)); +// av_log(NULL, AV_LOG_DEBUG,"%f %f %f %f\n", cs, ca, ca+cs, ca-cs); } /* compute mdct windows */ for(i=0;i<36;i++) { - int v; - v = FIXR(sin(M_PI * (i + 0.5) / 36.0)); - mdct_win[0][i] = v; - mdct_win[1][i] = v; - mdct_win[3][i] = v; - } - for(i=0;i<6;i++) { - mdct_win[1][18 + i] = FIXR(1.0); - mdct_win[1][24 + i] = FIXR(sin(M_PI * ((i + 6) + 0.5) / 12.0)); - mdct_win[1][30 + i] = FIXR(0.0); + for(j=0; j<4; j++){ + double d; - mdct_win[3][i] = FIXR(0.0); - mdct_win[3][6 + i] = FIXR(sin(M_PI * (i + 0.5) / 12.0)); - mdct_win[3][12 + i] = FIXR(1.0); + if(j==2 && i%3 != 1) + continue; + + d= sin(M_PI * (i + 0.5) / 36.0); + if(j==1){ + if (i>=30) d= 0; + else if(i>=24) d= sin(M_PI * (i - 18 + 0.5) / 12.0); + else if(i>=18) d= 1; + }else if(j==3){ + if (i< 6) d= 0; + else if(i< 12) d= sin(M_PI * (i - 6 + 0.5) / 12.0); + else if(i< 18) d= 1; + } + //merge last stage of imdct into the window coefficients + d*= 0.5 / cos(M_PI*(2*i + 19)/72); + + if(j==2) + mdct_win[j][i/3] = FIXHR((d / (1<<5))); + else + mdct_win[j][i ] = FIXHR((d / (1<<5))); +// av_log(NULL, AV_LOG_DEBUG, "%2d %d %f\n", i,j,d / (1<<5)); + } } - for(i=0;i<12;i++) - mdct_win[2][i] = FIXR(sin(M_PI * (i + 0.5) / 12.0)); - /* NOTE: we do frequency inversion adter the MDCT by changing the sign of the right window coefs */ for(j=0;j<4;j++) { @@ -516,21 +529,20 @@ static int decode_init(AVCodecContext * avctx) #if defined(DEBUG) for(j=0;j<8;j++) { - printf("win%d=\n", j); + av_log(avctx, AV_LOG_DEBUG, "win%d=\n", j); for(i=0;i<36;i++) - printf("%f, ", (double)mdct_win[j][i] / FRAC_ONE); - printf("\n"); + av_log(avctx, AV_LOG_DEBUG, "%f, ", (double)mdct_win[j][i] / FRAC_ONE); + av_log(avctx, AV_LOG_DEBUG, "\n"); } #endif init = 1; } - s->inbuf_index = 0; - s->inbuf = &s->inbuf1[s->inbuf_index][BACKSTEP_SIZE]; - s->inbuf_ptr = s->inbuf; #ifdef DEBUG s->frame_count = 0; #endif + if (avctx->codec_id == CODEC_ID_MP3ADU) + s->adu_mode = 1; return 0; } @@ -538,62 +550,62 @@ static int decode_init(AVCodecContext * avctx) /* cos(i*pi/64) */ -#define COS0_0 FIXR(0.50060299823519630134) -#define COS0_1 FIXR(0.50547095989754365998) -#define COS0_2 FIXR(0.51544730992262454697) -#define COS0_3 FIXR(0.53104259108978417447) -#define COS0_4 FIXR(0.55310389603444452782) -#define COS0_5 FIXR(0.58293496820613387367) -#define COS0_6 FIXR(0.62250412303566481615) -#define COS0_7 FIXR(0.67480834145500574602) -#define COS0_8 FIXR(0.74453627100229844977) -#define COS0_9 FIXR(0.83934964541552703873) -#define COS0_10 FIXR(0.97256823786196069369) -#define COS0_11 FIXR(1.16943993343288495515) -#define COS0_12 FIXR(1.48416461631416627724) -#define COS0_13 FIXR(2.05778100995341155085) -#define COS0_14 FIXR(3.40760841846871878570) -#define COS0_15 FIXR(10.19000812354805681150) +#define COS0_0 FIXHR(0.50060299823519630134/2) +#define COS0_1 FIXHR(0.50547095989754365998/2) +#define COS0_2 FIXHR(0.51544730992262454697/2) +#define COS0_3 FIXHR(0.53104259108978417447/2) +#define COS0_4 FIXHR(0.55310389603444452782/2) +#define COS0_5 FIXHR(0.58293496820613387367/2) +#define COS0_6 FIXHR(0.62250412303566481615/2) +#define COS0_7 FIXHR(0.67480834145500574602/2) +#define COS0_8 FIXHR(0.74453627100229844977/2) +#define COS0_9 FIXHR(0.83934964541552703873/2) +#define COS0_10 FIXHR(0.97256823786196069369/2) +#define COS0_11 FIXHR(1.16943993343288495515/4) +#define COS0_12 FIXHR(1.48416461631416627724/4) +#define COS0_13 FIXHR(2.05778100995341155085/8) +#define COS0_14 FIXHR(3.40760841846871878570/8) +#define COS0_15 FIXHR(10.19000812354805681150/32) -#define COS1_0 FIXR(0.50241928618815570551) -#define COS1_1 FIXR(0.52249861493968888062) -#define COS1_2 FIXR(0.56694403481635770368) -#define COS1_3 FIXR(0.64682178335999012954) -#define COS1_4 FIXR(0.78815462345125022473) -#define COS1_5 FIXR(1.06067768599034747134) -#define COS1_6 FIXR(1.72244709823833392782) -#define COS1_7 FIXR(5.10114861868916385802) +#define COS1_0 FIXHR(0.50241928618815570551/2) +#define COS1_1 FIXHR(0.52249861493968888062/2) +#define COS1_2 FIXHR(0.56694403481635770368/2) +#define COS1_3 FIXHR(0.64682178335999012954/2) +#define COS1_4 FIXHR(0.78815462345125022473/2) +#define COS1_5 FIXHR(1.06067768599034747134/4) +#define COS1_6 FIXHR(1.72244709823833392782/4) +#define COS1_7 FIXHR(5.10114861868916385802/16) -#define COS2_0 FIXR(0.50979557910415916894) -#define COS2_1 FIXR(0.60134488693504528054) -#define COS2_2 FIXR(0.89997622313641570463) -#define COS2_3 FIXR(2.56291544774150617881) +#define COS2_0 FIXHR(0.50979557910415916894/2) +#define COS2_1 FIXHR(0.60134488693504528054/2) +#define COS2_2 FIXHR(0.89997622313641570463/2) +#define COS2_3 FIXHR(2.56291544774150617881/8) -#define COS3_0 FIXR(0.54119610014619698439) -#define COS3_1 FIXR(1.30656296487637652785) +#define COS3_0 FIXHR(0.54119610014619698439/2) +#define COS3_1 FIXHR(1.30656296487637652785/4) -#define COS4_0 FIXR(0.70710678118654752439) +#define COS4_0 FIXHR(0.70710678118654752439/2) /* butterfly operator */ -#define BF(a, b, c)\ +#define BF(a, b, c, s)\ {\ tmp0 = tab[a] + tab[b];\ tmp1 = tab[a] - tab[b];\ tab[a] = tmp0;\ - tab[b] = MULL(tmp1, c);\ + tab[b] = MULH(tmp1<<(s), c);\ } #define BF1(a, b, c, d)\ {\ - BF(a, b, COS4_0);\ - BF(c, d, -COS4_0);\ + BF(a, b, COS4_0, 1);\ + BF(c, d,-COS4_0, 1);\ tab[c] += tab[d];\ } #define BF2(a, b, c, d)\ {\ - BF(a, b, COS4_0);\ - BF(c, d, -COS4_0);\ + BF(a, b, COS4_0, 1);\ + BF(c, d,-COS4_0, 1);\ tab[c] += tab[d];\ tab[a] += tab[c];\ tab[c] += tab[b];\ @@ -608,100 +620,108 @@ static void dct32(int32_t *out, int32_t *tab) int tmp0, tmp1; /* pass 1 */ - BF(0, 31, COS0_0); - BF(1, 30, COS0_1); - BF(2, 29, COS0_2); - BF(3, 28, COS0_3); - BF(4, 27, COS0_4); - BF(5, 26, COS0_5); - BF(6, 25, COS0_6); - BF(7, 24, COS0_7); - BF(8, 23, COS0_8); - BF(9, 22, COS0_9); - BF(10, 21, COS0_10); - BF(11, 20, COS0_11); - BF(12, 19, COS0_12); - BF(13, 18, COS0_13); - BF(14, 17, COS0_14); - BF(15, 16, COS0_15); - + BF( 0, 31, COS0_0 , 1); + BF(15, 16, COS0_15, 5); /* pass 2 */ - BF(0, 15, COS1_0); - BF(1, 14, COS1_1); - BF(2, 13, COS1_2); - BF(3, 12, COS1_3); - BF(4, 11, COS1_4); - BF(5, 10, COS1_5); - BF(6, 9, COS1_6); - BF(7, 8, COS1_7); - - BF(16, 31, -COS1_0); - BF(17, 30, -COS1_1); - BF(18, 29, -COS1_2); - BF(19, 28, -COS1_3); - BF(20, 27, -COS1_4); - BF(21, 26, -COS1_5); - BF(22, 25, -COS1_6); - BF(23, 24, -COS1_7); - + BF( 0, 15, COS1_0 , 1); + BF(16, 31,-COS1_0 , 1); + /* pass 1 */ + BF( 7, 24, COS0_7 , 1); + BF( 8, 23, COS0_8 , 1); + /* pass 2 */ + BF( 7, 8, COS1_7 , 4); + BF(23, 24,-COS1_7 , 4); /* pass 3 */ - BF(0, 7, COS2_0); - BF(1, 6, COS2_1); - BF(2, 5, COS2_2); - BF(3, 4, COS2_3); - - BF(8, 15, -COS2_0); - BF(9, 14, -COS2_1); - BF(10, 13, -COS2_2); - BF(11, 12, -COS2_3); - - BF(16, 23, COS2_0); - BF(17, 22, COS2_1); - BF(18, 21, COS2_2); - BF(19, 20, COS2_3); - - BF(24, 31, -COS2_0); - BF(25, 30, -COS2_1); - BF(26, 29, -COS2_2); - BF(27, 28, -COS2_3); - + BF( 0, 7, COS2_0 , 1); + BF( 8, 15,-COS2_0 , 1); + BF(16, 23, COS2_0 , 1); + BF(24, 31,-COS2_0 , 1); + /* pass 1 */ + BF( 3, 28, COS0_3 , 1); + BF(12, 19, COS0_12, 2); + /* pass 2 */ + BF( 3, 12, COS1_3 , 1); + BF(19, 28,-COS1_3 , 1); + /* pass 1 */ + BF( 4, 27, COS0_4 , 1); + BF(11, 20, COS0_11, 2); + /* pass 2 */ + BF( 4, 11, COS1_4 , 1); + BF(20, 27,-COS1_4 , 1); + /* pass 3 */ + BF( 3, 4, COS2_3 , 3); + BF(11, 12,-COS2_3 , 3); + BF(19, 20, COS2_3 , 3); + BF(27, 28,-COS2_3 , 3); /* pass 4 */ - BF(0, 3, COS3_0); - BF(1, 2, COS3_1); - - BF(4, 7, -COS3_0); - BF(5, 6, -COS3_1); - - BF(8, 11, COS3_0); - BF(9, 10, COS3_1); - - BF(12, 15, -COS3_0); - BF(13, 14, -COS3_1); - - BF(16, 19, COS3_0); - BF(17, 18, COS3_1); - - BF(20, 23, -COS3_0); - BF(21, 22, -COS3_1); - - BF(24, 27, COS3_0); - BF(25, 26, COS3_1); - - BF(28, 31, -COS3_0); - BF(29, 30, -COS3_1); - + BF( 0, 3, COS3_0 , 1); + BF( 4, 7,-COS3_0 , 1); + BF( 8, 11, COS3_0 , 1); + BF(12, 15,-COS3_0 , 1); + BF(16, 19, COS3_0 , 1); + BF(20, 23,-COS3_0 , 1); + BF(24, 27, COS3_0 , 1); + BF(28, 31,-COS3_0 , 1); + + + + /* pass 1 */ + BF( 1, 30, COS0_1 , 1); + BF(14, 17, COS0_14, 3); + /* pass 2 */ + BF( 1, 14, COS1_1 , 1); + BF(17, 30,-COS1_1 , 1); + /* pass 1 */ + BF( 6, 25, COS0_6 , 1); + BF( 9, 22, COS0_9 , 1); + /* pass 2 */ + BF( 6, 9, COS1_6 , 2); + BF(22, 25,-COS1_6 , 2); + /* pass 3 */ + BF( 1, 6, COS2_1 , 1); + BF( 9, 14,-COS2_1 , 1); + BF(17, 22, COS2_1 , 1); + BF(25, 30,-COS2_1 , 1); + + /* pass 1 */ + BF( 2, 29, COS0_2 , 1); + BF(13, 18, COS0_13, 3); + /* pass 2 */ + BF( 2, 13, COS1_2 , 1); + BF(18, 29,-COS1_2 , 1); + /* pass 1 */ + BF( 5, 26, COS0_5 , 1); + BF(10, 21, COS0_10, 1); + /* pass 2 */ + BF( 5, 10, COS1_5 , 2); + BF(21, 26,-COS1_5 , 2); + /* pass 3 */ + BF( 2, 5, COS2_2 , 1); + BF(10, 13,-COS2_2 , 1); + BF(18, 21, COS2_2 , 1); + BF(26, 29,-COS2_2 , 1); + /* pass 4 */ + BF( 1, 2, COS3_1 , 2); + BF( 5, 6,-COS3_1 , 2); + BF( 9, 10, COS3_1 , 2); + BF(13, 14,-COS3_1 , 2); + BF(17, 18, COS3_1 , 2); + BF(21, 22,-COS3_1 , 2); + BF(25, 26, COS3_1 , 2); + BF(29, 30,-COS3_1 , 2); + /* pass 5 */ - BF1(0, 1, 2, 3); - BF2(4, 5, 6, 7); - BF1(8, 9, 10, 11); + BF1( 0, 1, 2, 3); + BF2( 4, 5, 6, 7); + BF1( 8, 9, 10, 11); BF2(12, 13, 14, 15); BF1(16, 17, 18, 19); BF2(20, 21, 22, 23); BF1(24, 25, 26, 27); BF2(28, 29, 30, 31); - + /* pass 6 */ - + ADD( 8, 12); ADD(12, 10); ADD(10, 14); @@ -726,7 +746,7 @@ static void dct32(int32_t *out, int32_t *tab) out[22] = tab[13]; out[14] = tab[14]; out[30] = tab[15]; - + ADD(24, 28); ADD(28, 26); ADD(26, 30); @@ -753,132 +773,137 @@ static void dct32(int32_t *out, int32_t *tab) out[31] = tab[31]; } -#define OUT_SHIFT (WFRAC_BITS + FRAC_BITS - 15) - #if FRAC_BITS <= 15 -static inline int round_sample(int sum) +static inline int round_sample(int *sum) { int sum1; - sum1 = (sum + (1 << (OUT_SHIFT - 1))) >> OUT_SHIFT; - if (sum1 < -32768) - sum1 = -32768; - else if (sum1 > 32767) - sum1 = 32767; + sum1 = (*sum) >> OUT_SHIFT; + *sum &= (1< OUT_MAX) + sum1 = OUT_MAX; return sum1; } -#if defined(ARCH_POWERPC_405) - /* signed 16x16 -> 32 multiply add accumulate */ -#define MACS(rt, ra, rb) \ - asm ("maclhw %0, %2, %3" : "=r" (rt) : "0" (rt), "r" (ra), "r" (rb)); +#define MACS(rt, ra, rb) MAC16(rt, ra, rb) /* signed 16x16 -> 32 multiply */ -#define MULS(ra, rb) \ - ({ int __rt; asm ("mullhw %0, %1, %2" : "=r" (__rt) : "r" (ra), "r" (rb)); __rt; }) +#define MULS(ra, rb) MUL16(ra, rb) + +#define MLSS(rt, ra, rb) MLS16(rt, ra, rb) #else -/* signed 16x16 -> 32 multiply add accumulate */ -#define MACS(rt, ra, rb) rt += (ra) * (rb) - -/* signed 16x16 -> 32 multiply */ -#define MULS(ra, rb) ((ra) * (rb)) - -#endif - -#else - -static inline int round_sample(int64_t sum) +static inline int round_sample(int64_t *sum) { int sum1; - sum1 = (int)((sum + (int64_t_C(1) << (OUT_SHIFT - 1))) >> OUT_SHIFT); - if (sum1 < -32768) - sum1 = -32768; - else if (sum1 > 32767) - sum1 = 32767; + sum1 = (int)((*sum) >> OUT_SHIFT); + *sum &= (1< OUT_MAX) + sum1 = OUT_MAX; return sum1; } -#define MULS(ra, rb) MUL64(ra, rb) - +# define MULS(ra, rb) MUL64(ra, rb) +# define MACS(rt, ra, rb) MAC64(rt, ra, rb) +# define MLSS(rt, ra, rb) MLS64(rt, ra, rb) #endif -#define SUM8(sum, op, w, p) \ -{ \ - sum op MULS((w)[0 * 64], p[0 * 64]);\ - sum op MULS((w)[1 * 64], p[1 * 64]);\ - sum op MULS((w)[2 * 64], p[2 * 64]);\ - sum op MULS((w)[3 * 64], p[3 * 64]);\ - sum op MULS((w)[4 * 64], p[4 * 64]);\ - sum op MULS((w)[5 * 64], p[5 * 64]);\ - sum op MULS((w)[6 * 64], p[6 * 64]);\ - sum op MULS((w)[7 * 64], p[7 * 64]);\ +#define SUM8(op, sum, w, p) \ +{ \ + op(sum, (w)[0 * 64], p[0 * 64]); \ + op(sum, (w)[1 * 64], p[1 * 64]); \ + op(sum, (w)[2 * 64], p[2 * 64]); \ + op(sum, (w)[3 * 64], p[3 * 64]); \ + op(sum, (w)[4 * 64], p[4 * 64]); \ + op(sum, (w)[5 * 64], p[5 * 64]); \ + op(sum, (w)[6 * 64], p[6 * 64]); \ + op(sum, (w)[7 * 64], p[7 * 64]); \ } #define SUM8P2(sum1, op1, sum2, op2, w1, w2, p) \ { \ int tmp;\ tmp = p[0 * 64];\ - sum1 op1 MULS((w1)[0 * 64], tmp);\ - sum2 op2 MULS((w2)[0 * 64], tmp);\ + op1(sum1, (w1)[0 * 64], tmp);\ + op2(sum2, (w2)[0 * 64], tmp);\ tmp = p[1 * 64];\ - sum1 op1 MULS((w1)[1 * 64], tmp);\ - sum2 op2 MULS((w2)[1 * 64], tmp);\ + op1(sum1, (w1)[1 * 64], tmp);\ + op2(sum2, (w2)[1 * 64], tmp);\ tmp = p[2 * 64];\ - sum1 op1 MULS((w1)[2 * 64], tmp);\ - sum2 op2 MULS((w2)[2 * 64], tmp);\ + op1(sum1, (w1)[2 * 64], tmp);\ + op2(sum2, (w2)[2 * 64], tmp);\ tmp = p[3 * 64];\ - sum1 op1 MULS((w1)[3 * 64], tmp);\ - sum2 op2 MULS((w2)[3 * 64], tmp);\ + op1(sum1, (w1)[3 * 64], tmp);\ + op2(sum2, (w2)[3 * 64], tmp);\ tmp = p[4 * 64];\ - sum1 op1 MULS((w1)[4 * 64], tmp);\ - sum2 op2 MULS((w2)[4 * 64], tmp);\ + op1(sum1, (w1)[4 * 64], tmp);\ + op2(sum2, (w2)[4 * 64], tmp);\ tmp = p[5 * 64];\ - sum1 op1 MULS((w1)[5 * 64], tmp);\ - sum2 op2 MULS((w2)[5 * 64], tmp);\ + op1(sum1, (w1)[5 * 64], tmp);\ + op2(sum2, (w2)[5 * 64], tmp);\ tmp = p[6 * 64];\ - sum1 op1 MULS((w1)[6 * 64], tmp);\ - sum2 op2 MULS((w2)[6 * 64], tmp);\ + op1(sum1, (w1)[6 * 64], tmp);\ + op2(sum2, (w2)[6 * 64], tmp);\ tmp = p[7 * 64];\ - sum1 op1 MULS((w1)[7 * 64], tmp);\ - sum2 op2 MULS((w2)[7 * 64], tmp);\ + op1(sum1, (w1)[7 * 64], tmp);\ + op2(sum2, (w2)[7 * 64], tmp);\ } +void ff_mpa_synth_init(MPA_INT *window) +{ + int i; + + /* max = 18760, max sum over all 16 coefs : 44736 */ + for(i=0;i<257;i++) { + int v; + v = ff_mpa_enwindow[i]; +#if WFRAC_BITS < 16 + v = (v + (1 << (16 - WFRAC_BITS - 1))) >> (16 - WFRAC_BITS); +#endif + window[i] = v; + if ((i & 63) != 0) + v = -v; + if (i != 0) + window[512 - i] = v; + } +} /* 32 sub band synthesis filter. Input: 32 sub band samples, Output: 32 samples. */ /* XXX: optimize by avoiding ring buffer usage */ -static void synth_filter(MPADecodeContext *s1, - int ch, int16_t *samples, int incr, +void ff_mpa_synth_filter(MPA_INT *synth_buf_ptr, int *synth_buf_offset, + MPA_INT *window, int *dither_state, + OUT_INT *samples, int incr, int32_t sb_samples[SBLIMIT]) { int32_t tmp[32]; register MPA_INT *synth_buf; - const register MPA_INT *w, *w2, *p; + register const MPA_INT *w, *w2, *p; int j, offset, v; - int16_t *samples2; + OUT_INT *samples2; #if FRAC_BITS <= 15 int sum, sum2; #else int64_t sum, sum2; #endif - + dct32(tmp, sb_samples); - - offset = s1->synth_buf_offset[ch]; - synth_buf = s1->synth_buf[ch] + offset; + + offset = *synth_buf_offset; + synth_buf = synth_buf_ptr + offset; for(j=0;j<32;j++) { v = tmp[j]; #if FRAC_BITS <= 15 /* NOTE: can cause a loss in precision if very high amplitude sound */ - if (v > 32767) - v = 32767; - else if (v < -32768) - v = -32768; + v = av_clip_int16(v); #endif synth_buf[j] = v; } @@ -889,148 +914,129 @@ static void synth_filter(MPADecodeContext *s1, w = window; w2 = window + 31; - sum = 0; + sum = *dither_state; p = synth_buf + 16; - SUM8(sum, +=, w, p); + SUM8(MACS, sum, w, p); p = synth_buf + 48; - SUM8(sum, -=, w + 32, p); - *samples = round_sample(sum); + SUM8(MLSS, sum, w + 32, p); + *samples = round_sample(&sum); samples += incr; w++; /* we calculate two samples at the same time to avoid one memory access per two sample */ for(j=1;j<16;j++) { - sum = 0; sum2 = 0; p = synth_buf + 16 + j; - SUM8P2(sum, +=, sum2, -=, w, w2, p); + SUM8P2(sum, MACS, sum2, MLSS, w, w2, p); p = synth_buf + 48 - j; - SUM8P2(sum, -=, sum2, -=, w + 32, w2 + 32, p); + SUM8P2(sum, MLSS, sum2, MLSS, w + 32, w2 + 32, p); - *samples = round_sample(sum); + *samples = round_sample(&sum); samples += incr; - *samples2 = round_sample(sum2); + sum += sum2; + *samples2 = round_sample(&sum); samples2 -= incr; w++; w2--; } - + p = synth_buf + 32; - sum = 0; - SUM8(sum, -=, w + 32, p); - *samples = round_sample(sum); + SUM8(MLSS, sum, w + 32, p); + *samples = round_sample(&sum); + *dither_state= sum; offset = (offset - 32) & 511; - s1->synth_buf_offset[ch] = offset; + *synth_buf_offset = offset; } -/* cos(pi*i/24) */ -#define C1 FIXR(0.99144486137381041114) -#define C3 FIXR(0.92387953251128675612) -#define C5 FIXR(0.79335334029123516458) -#define C7 FIXR(0.60876142900872063941) -#define C9 FIXR(0.38268343236508977173) -#define C11 FIXR(0.13052619222005159154) +#define C3 FIXHR(0.86602540378443864676/2) + +/* 0.5 / cos(pi*(2*i+1)/36) */ +static const int icos36[9] = { + FIXR(0.50190991877167369479), + FIXR(0.51763809020504152469), //0 + FIXR(0.55168895948124587824), + FIXR(0.61038729438072803416), + FIXR(0.70710678118654752439), //1 + FIXR(0.87172339781054900991), + FIXR(1.18310079157624925896), + FIXR(1.93185165257813657349), //2 + FIXR(5.73685662283492756461), +}; + +/* 0.5 / cos(pi*(2*i+1)/36) */ +static const int icos36h[9] = { + FIXHR(0.50190991877167369479/2), + FIXHR(0.51763809020504152469/2), //0 + FIXHR(0.55168895948124587824/2), + FIXHR(0.61038729438072803416/2), + FIXHR(0.70710678118654752439/2), //1 + FIXHR(0.87172339781054900991/2), + FIXHR(1.18310079157624925896/4), + FIXHR(1.93185165257813657349/4), //2 +// FIXHR(5.73685662283492756461), +}; /* 12 points IMDCT. We compute it "by hand" by factorizing obvious cases. */ static void imdct12(int *out, int *in) { - int tmp; - int64_t in1_3, in1_9, in4_3, in4_9; + int in0, in1, in2, in3, in4, in5, t1, t2; - in1_3 = MUL64(in[1], C3); - in1_9 = MUL64(in[1], C9); - in4_3 = MUL64(in[4], C3); - in4_9 = MUL64(in[4], C9); - - tmp = FRAC_RND(MUL64(in[0], C7) - in1_3 - MUL64(in[2], C11) + - MUL64(in[3], C1) - in4_9 - MUL64(in[5], C5)); - out[0] = tmp; - out[5] = -tmp; - tmp = FRAC_RND(MUL64(in[0] - in[3], C9) - in1_3 + - MUL64(in[2] + in[5], C3) - in4_9); - out[1] = tmp; - out[4] = -tmp; - tmp = FRAC_RND(MUL64(in[0], C11) - in1_9 + MUL64(in[2], C7) - - MUL64(in[3], C5) + in4_3 - MUL64(in[5], C1)); - out[2] = tmp; - out[3] = -tmp; - tmp = FRAC_RND(MUL64(-in[0], C5) + in1_9 + MUL64(in[2], C1) + - MUL64(in[3], C11) - in4_3 - MUL64(in[5], C7)); - out[6] = tmp; - out[11] = tmp; - tmp = FRAC_RND(MUL64(-in[0] + in[3], C3) - in1_9 + - MUL64(in[2] + in[5], C9) + in4_3); - out[7] = tmp; - out[10] = tmp; - tmp = FRAC_RND(-MUL64(in[0], C1) - in1_3 - MUL64(in[2], C5) - - MUL64(in[3], C7) - in4_9 - MUL64(in[5], C11)); - out[8] = tmp; - out[9] = tmp; + in0= in[0*3]; + in1= in[1*3] + in[0*3]; + in2= in[2*3] + in[1*3]; + in3= in[3*3] + in[2*3]; + in4= in[4*3] + in[3*3]; + in5= in[5*3] + in[4*3]; + in5 += in3; + in3 += in1; + + in2= MULH(2*in2, C3); + in3= MULH(4*in3, C3); + + t1 = in0 - in4; + t2 = MULH(2*(in1 - in5), icos36h[4]); + + out[ 7]= + out[10]= t1 + t2; + out[ 1]= + out[ 4]= t1 - t2; + + in0 += in4>>1; + in4 = in0 + in2; + in5 += 2*in1; + in1 = MULH(in5 + in3, icos36h[1]); + out[ 8]= + out[ 9]= in4 + in1; + out[ 2]= + out[ 3]= in4 - in1; + + in0 -= in2; + in5 = MULH(2*(in5 - in3), icos36h[7]); + out[ 0]= + out[ 5]= in0 - in5; + out[ 6]= + out[11]= in0 + in5; } -#undef C1 -#undef C3 -#undef C5 -#undef C7 -#undef C9 -#undef C11 - /* cos(pi*i/18) */ -#define C1 FIXR(0.98480775301220805936) -#define C2 FIXR(0.93969262078590838405) -#define C3 FIXR(0.86602540378443864676) -#define C4 FIXR(0.76604444311897803520) -#define C5 FIXR(0.64278760968653932632) -#define C6 FIXR(0.5) -#define C7 FIXR(0.34202014332566873304) -#define C8 FIXR(0.17364817766693034885) +#define C1 FIXHR(0.98480775301220805936/2) +#define C2 FIXHR(0.93969262078590838405/2) +#define C3 FIXHR(0.86602540378443864676/2) +#define C4 FIXHR(0.76604444311897803520/2) +#define C5 FIXHR(0.64278760968653932632/2) +#define C6 FIXHR(0.5/2) +#define C7 FIXHR(0.34202014332566873304/2) +#define C8 FIXHR(0.17364817766693034885/2) -/* 0.5 / cos(pi*(2*i+1)/36) */ -static const int icos36[9] = { - FIXR(0.50190991877167369479), - FIXR(0.51763809020504152469), - FIXR(0.55168895948124587824), - FIXR(0.61038729438072803416), - FIXR(0.70710678118654752439), - FIXR(0.87172339781054900991), - FIXR(1.18310079157624925896), - FIXR(1.93185165257813657349), - FIXR(5.73685662283492756461), -}; - -static const int icos72[18] = { - /* 0.5 / cos(pi*(2*i+19)/72) */ - FIXR(0.74009361646113053152), - FIXR(0.82133981585229078570), - FIXR(0.93057949835178895673), - FIXR(1.08284028510010010928), - FIXR(1.30656296487637652785), - FIXR(1.66275476171152078719), - FIXR(2.31011315767264929558), - FIXR(3.83064878777019433457), - FIXR(11.46279281302667383546), - - /* 0.5 / cos(pi*(2*(i + 18) +19)/72) */ - FIXR(-0.67817085245462840086), - FIXR(-0.63023620700513223342), - FIXR(-0.59284452371708034528), - FIXR(-0.56369097343317117734), - FIXR(-0.54119610014619698439), - FIXR(-0.52426456257040533932), - FIXR(-0.51213975715725461845), - FIXR(-0.50431448029007636036), - FIXR(-0.50047634258165998492), -}; /* using Lee like decomposition followed by hand coded 9 points DCT */ -static void imdct36(int *out, int *in) +static void imdct36(int *out, int *buf, int *in, int *win) { int i, j, t0, t1, t2, t3, s0, s1, s2, s3; int tmp[18], *tmp1, *in1; - int64_t in3_3, in6_6; for(i=17;i>=1;i--) in[i] += in[i-1]; @@ -1040,30 +1046,61 @@ static void imdct36(int *out, int *in) for(j=0;j<2;j++) { tmp1 = tmp + j; in1 = in + j; +#if 0 +//more accurate but slower + int64_t t0, t1, t2, t3; + t2 = in1[2*4] + in1[2*8] - in1[2*2]; - in3_3 = MUL64(in1[2*3], C3); - in6_6 = MUL64(in1[2*6], C6); + t3 = (in1[2*0] + (int64_t)(in1[2*6]>>1))<<32; + t1 = in1[2*0] - in1[2*6]; + tmp1[ 6] = t1 - (t2>>1); + tmp1[16] = t1 + t2; - tmp1[0] = FRAC_RND(MUL64(in1[2*1], C1) + in3_3 + - MUL64(in1[2*5], C5) + MUL64(in1[2*7], C7)); - tmp1[2] = in1[2*0] + FRAC_RND(MUL64(in1[2*2], C2) + - MUL64(in1[2*4], C4) + in6_6 + - MUL64(in1[2*8], C8)); - tmp1[4] = FRAC_RND(MUL64(in1[2*1] - in1[2*5] - in1[2*7], C3)); - tmp1[6] = FRAC_RND(MUL64(in1[2*2] - in1[2*4] - in1[2*8], C6)) - - in1[2*6] + in1[2*0]; - tmp1[8] = FRAC_RND(MUL64(in1[2*1], C5) - in3_3 - - MUL64(in1[2*5], C7) + MUL64(in1[2*7], C1)); - tmp1[10] = in1[2*0] + FRAC_RND(MUL64(-in1[2*2], C8) - - MUL64(in1[2*4], C2) + in6_6 + - MUL64(in1[2*8], C4)); - tmp1[12] = FRAC_RND(MUL64(in1[2*1], C7) - in3_3 + - MUL64(in1[2*5], C1) - - MUL64(in1[2*7], C5)); - tmp1[14] = in1[2*0] + FRAC_RND(MUL64(-in1[2*2], C4) + - MUL64(in1[2*4], C8) + in6_6 - - MUL64(in1[2*8], C2)); - tmp1[16] = in1[2*0] - in1[2*2] + in1[2*4] - in1[2*6] + in1[2*8]; + t0 = MUL64(2*(in1[2*2] + in1[2*4]), C2); + t1 = MUL64( in1[2*4] - in1[2*8] , -2*C8); + t2 = MUL64(2*(in1[2*2] + in1[2*8]), -C4); + + tmp1[10] = (t3 - t0 - t2) >> 32; + tmp1[ 2] = (t3 + t0 + t1) >> 32; + tmp1[14] = (t3 + t2 - t1) >> 32; + + tmp1[ 4] = MULH(2*(in1[2*5] + in1[2*7] - in1[2*1]), -C3); + t2 = MUL64(2*(in1[2*1] + in1[2*5]), C1); + t3 = MUL64( in1[2*5] - in1[2*7] , -2*C7); + t0 = MUL64(2*in1[2*3], C3); + + t1 = MUL64(2*(in1[2*1] + in1[2*7]), -C5); + + tmp1[ 0] = (t2 + t3 + t0) >> 32; + tmp1[12] = (t2 + t1 - t0) >> 32; + tmp1[ 8] = (t3 - t1 - t0) >> 32; +#else + t2 = in1[2*4] + in1[2*8] - in1[2*2]; + + t3 = in1[2*0] + (in1[2*6]>>1); + t1 = in1[2*0] - in1[2*6]; + tmp1[ 6] = t1 - (t2>>1); + tmp1[16] = t1 + t2; + + t0 = MULH(2*(in1[2*2] + in1[2*4]), C2); + t1 = MULH( in1[2*4] - in1[2*8] , -2*C8); + t2 = MULH(2*(in1[2*2] + in1[2*8]), -C4); + + tmp1[10] = t3 - t0 - t2; + tmp1[ 2] = t3 + t0 + t1; + tmp1[14] = t3 + t2 - t1; + + tmp1[ 4] = MULH(2*(in1[2*5] + in1[2*7] - in1[2*1]), -C3); + t2 = MULH(2*(in1[2*1] + in1[2*5]), C1); + t3 = MULH( in1[2*5] - in1[2*7] , -2*C7); + t0 = MULH(2*in1[2*3], C3); + + t1 = MULH(2*(in1[2*1] + in1[2*7]), -C5); + + tmp1[ 0] = t2 + t3 + t0; + tmp1[12] = t2 + t1 - t0; + tmp1[ 8] = t3 - t1 - t0; +#endif } i = 0; @@ -1075,191 +1112,33 @@ static void imdct36(int *out, int *in) t2 = tmp[i + 1]; t3 = tmp[i + 3]; - s1 = MULL(t3 + t2, icos36[j]); + s1 = MULH(2*(t3 + t2), icos36h[j]); s3 = MULL(t3 - t2, icos36[8 - j]); - - t0 = MULL(s0 + s1, icos72[9 + 8 - j]); - t1 = MULL(s0 - s1, icos72[8 - j]); - out[18 + 9 + j] = t0; - out[18 + 8 - j] = t0; - out[9 + j] = -t1; - out[8 - j] = t1; - - t0 = MULL(s2 + s3, icos72[9+j]); - t1 = MULL(s2 - s3, icos72[j]); - out[18 + 9 + (8 - j)] = t0; - out[18 + j] = t0; - out[9 + (8 - j)] = -t1; - out[j] = t1; + + t0 = s0 + s1; + t1 = s0 - s1; + out[(9 + j)*SBLIMIT] = MULH(t1, win[9 + j]) + buf[9 + j]; + out[(8 - j)*SBLIMIT] = MULH(t1, win[8 - j]) + buf[8 - j]; + buf[9 + j] = MULH(t0, win[18 + 9 + j]); + buf[8 - j] = MULH(t0, win[18 + 8 - j]); + + t0 = s2 + s3; + t1 = s2 - s3; + out[(9 + 8 - j)*SBLIMIT] = MULH(t1, win[9 + 8 - j]) + buf[9 + 8 - j]; + out[( j)*SBLIMIT] = MULH(t1, win[ j]) + buf[ j]; + buf[9 + 8 - j] = MULH(t0, win[18 + 9 + 8 - j]); + buf[ + j] = MULH(t0, win[18 + j]); i += 4; } s0 = tmp[16]; - s1 = MULL(tmp[17], icos36[4]); - t0 = MULL(s0 + s1, icos72[9 + 4]); - t1 = MULL(s0 - s1, icos72[4]); - out[18 + 9 + 4] = t0; - out[18 + 8 - 4] = t0; - out[9 + 4] = -t1; - out[8 - 4] = t1; -} - -/* fast header check for resync */ -static int check_header(uint32_t header) -{ - /* header */ - if ((header & 0xffe00000) != 0xffe00000) - return -1; - /* layer check */ - if (((header >> 17) & 3) == 0) - return -1; - /* bit rate */ - if (((header >> 12) & 0xf) == 0xf) - return -1; - /* frequency */ - if (((header >> 10) & 3) == 3) - return -1; - return 0; -} - -/* header + layer + bitrate + freq + lsf/mpeg25 */ -#define SAME_HEADER_MASK \ - (0xffe00000 | (3 << 17) | (0xf << 12) | (3 << 10) | (3 << 19)) - -/* header decoding. MUST check the header before because no - consistency check is done there. Return 1 if free format found and - that the frame size must be computed externally */ -static int decode_header(MPADecodeContext *s, uint32_t header) -{ - int sample_rate, frame_size, mpeg25, padding; - int sample_rate_index, bitrate_index; - if (header & (1<<20)) { - s->lsf = (header & (1<<19)) ? 0 : 1; - mpeg25 = 0; - } else { - s->lsf = 1; - mpeg25 = 1; - } - - s->layer = 4 - ((header >> 17) & 3); - /* extract frequency */ - sample_rate_index = (header >> 10) & 3; - sample_rate = mpa_freq_tab[sample_rate_index] >> (s->lsf + mpeg25); - sample_rate_index += 3 * (s->lsf + mpeg25); - s->sample_rate_index = sample_rate_index; - s->error_protection = ((header >> 16) & 1) ^ 1; - s->sample_rate = sample_rate; - - bitrate_index = (header >> 12) & 0xf; - padding = (header >> 9) & 1; - //extension = (header >> 8) & 1; - s->mode = (header >> 6) & 3; - s->mode_ext = (header >> 4) & 3; - //copyright = (header >> 3) & 1; - //original = (header >> 2) & 1; - //emphasis = header & 3; - - if (s->mode == MPA_MONO) - s->nb_channels = 1; - else - s->nb_channels = 2; - - if (bitrate_index != 0) { - frame_size = mpa_bitrate_tab[s->lsf][s->layer - 1][bitrate_index]; - s->bit_rate = frame_size * 1000; - switch(s->layer) { - case 1: - frame_size = (frame_size * 12000) / sample_rate; - frame_size = (frame_size + padding) * 4; - break; - case 2: - frame_size = (frame_size * 144000) / sample_rate; - frame_size += padding; - break; - default: - case 3: - frame_size = (frame_size * 144000) / (sample_rate << s->lsf); - frame_size += padding; - break; - } - s->frame_size = frame_size; - } else { - /* if no frame size computed, signal it */ - if (!s->free_format_frame_size) - return 1; - /* free format: compute bitrate and real frame size from the - frame size we extracted by reading the bitstream */ - s->frame_size = s->free_format_frame_size; - switch(s->layer) { - case 1: - s->frame_size += padding * 4; - s->bit_rate = (s->frame_size * sample_rate) / 48000; - break; - case 2: - s->frame_size += padding; - s->bit_rate = (s->frame_size * sample_rate) / 144000; - break; - default: - case 3: - s->frame_size += padding; - s->bit_rate = (s->frame_size * (sample_rate << s->lsf)) / 144000; - break; - } - } - -#if defined(DEBUG) - printf("layer%d, %d Hz, %d kbits/s, ", - s->layer, s->sample_rate, s->bit_rate); - if (s->nb_channels == 2) { - if (s->layer == 3) { - if (s->mode_ext & MODE_EXT_MS_STEREO) - printf("ms-"); - if (s->mode_ext & MODE_EXT_I_STEREO) - printf("i-"); - } - printf("stereo"); - } else { - printf("mono"); - } - printf("\n"); -#endif - return 0; -} - -/* useful helper to get mpeg audio stream infos. Return -1 if error in - header, otherwise the coded frame size in bytes */ -int mpa_decode_header(AVCodecContext *avctx, uint32_t head) -{ - MPADecodeContext s1, *s = &s1; - - if (check_header(head) != 0) - return -1; - - if (decode_header(s, head) != 0) { - return -1; - } - - switch(s->layer) { - case 1: - avctx->frame_size = 384; - break; - case 2: - avctx->frame_size = 1152; - break; - default: - case 3: - if (s->lsf) - avctx->frame_size = 576; - else - avctx->frame_size = 1152; - break; - } - - avctx->sample_rate = s->sample_rate; - avctx->channels = s->nb_channels; - avctx->bit_rate = s->bit_rate; - avctx->sub_id = s->layer; - return s->frame_size; + s1 = MULH(2*tmp[17], icos36h[4]); + t0 = s0 + s1; + t1 = s0 - s1; + out[(9 + 4)*SBLIMIT] = MULH(t1, win[9 + 4]) + buf[9 + 4]; + out[(8 - 4)*SBLIMIT] = MULH(t1, win[8 - 4]) + buf[8 - 4]; + buf[9 + 4] = MULH(t0, win[18 + 9 + 4]); + buf[8 - 4] = MULH(t0, win[18 + 8 - 4]); } /* return the number of decoded frames */ @@ -1269,7 +1148,7 @@ static int mp_decode_layer1(MPADecodeContext *s) uint8_t allocation[MPA_MAX_CHANNELS][SBLIMIT]; uint8_t scale_factors[MPA_MAX_CHANNELS][SBLIMIT]; - if (s->mode == MPA_JSTEREO) + if (s->mode == MPA_JSTEREO) bound = (s->mode_ext + 1) * 4; else bound = SBLIMIT; @@ -1297,7 +1176,7 @@ static int mp_decode_layer1(MPADecodeContext *s) scale_factors[1][i] = get_bits(&s->gb, 6); } } - + /* compute samples */ for(j=0;j<12;j++) { for(i=0;i= 56) || - (ch_bitrate >= 56 && ch_bitrate <= 80)) - table = 0; - else if (freq != 48000 && ch_bitrate >= 96) - table = 1; - else if (freq != 32000 && ch_bitrate <= 48) - table = 2; - else - table = 3; - } else { - table = 4; - } - return table; -} - static int mp_decode_layer2(MPADecodeContext *s) { int sblimit; /* number of used subbands */ @@ -1362,17 +1219,21 @@ static int mp_decode_layer2(MPADecodeContext *s) int scale, qindex, bits, steps, k, l, m, b; /* select decoding table */ - table = l2_select_table(s->bit_rate / 1000, s->nb_channels, + table = ff_mpa_l2_select_table(s->bit_rate / 1000, s->nb_channels, s->sample_rate, s->lsf); - sblimit = sblimit_table[table]; - alloc_table = alloc_tables[table]; + sblimit = ff_mpa_sblimit_table[table]; + alloc_table = ff_mpa_alloc_tables[table]; - if (s->mode == MPA_JSTEREO) + if (s->mode == MPA_JSTEREO) bound = (s->mode_ext + 1) * 4; else bound = sblimit; - dprintf("bound=%d sblimit=%d\n", bound, sblimit); + dprintf(s->avctx, "bound=%d sblimit=%d\n", bound, sblimit); + + /* sanity check */ + if( bound > sblimit ) bound = sblimit; + /* parse bit allocation */ j = 0; for(i=0;inb_channels;ch++) { for(i=0;iavctx, " %d", bit_alloc[ch][i]); + dprintf(s->avctx, "\n"); } } #endif @@ -1403,11 +1264,11 @@ static int mp_decode_layer2(MPADecodeContext *s) /* scale codes */ for(i=0;inb_channels;ch++) { - if (bit_alloc[ch][i]) + if (bit_alloc[ch][i]) scale_code[ch][i] = get_bits(&s->gb, 2); } } - + /* scale factors */ for(i=0;inb_channels;ch++) { @@ -1445,12 +1306,12 @@ static int mp_decode_layer2(MPADecodeContext *s) for(i=0;iavctx, " %d %d %d", sf[0], sf[1], sf[2]); } else { - printf(" -"); + dprintf(s->avctx, " -"); } } - printf("\n"); + dprintf(s->avctx, "\n"); } #endif @@ -1465,18 +1326,18 @@ static int mp_decode_layer2(MPADecodeContext *s) if (b) { scale = scale_factors[ch][i][k]; qindex = alloc_table[j+b]; - bits = quant_bits[qindex]; + bits = ff_mpa_quant_bits[qindex]; if (bits < 0) { /* 3 values at the same time */ v = get_bits(&s->gb, -bits); - steps = quant_steps[qindex]; - s->sb_samples[ch][k * 12 + l + 0][i] = + steps = ff_mpa_quant_steps[qindex]; + s->sb_samples[ch][k * 12 + l + 0][i] = l2_unscale_group(steps, v % steps, scale); v = v / steps; - s->sb_samples[ch][k * 12 + l + 1][i] = + s->sb_samples[ch][k * 12 + l + 1][i] = l2_unscale_group(steps, v % steps, scale); v = v / steps; - s->sb_samples[ch][k * 12 + l + 2][i] = + s->sb_samples[ch][k * 12 + l + 2][i] = l2_unscale_group(steps, v, scale); } else { for(m=0;m<3;m++) { @@ -1492,7 +1353,7 @@ static int mp_decode_layer2(MPADecodeContext *s) } } /* next subband in alloc table */ - j += 1 << bit_alloc_bits; + j += 1 << bit_alloc_bits; } /* XXX: find a way to avoid this duplication of code */ for(i=bound;igb, -bits); - steps = quant_steps[qindex]; + steps = ff_mpa_quant_steps[qindex]; mant = v % steps; v = v / steps; - s->sb_samples[0][k * 12 + l + 0][i] = + s->sb_samples[0][k * 12 + l + 0][i] = l2_unscale_group(steps, mant, scale0); - s->sb_samples[1][k * 12 + l + 0][i] = + s->sb_samples[1][k * 12 + l + 0][i] = l2_unscale_group(steps, mant, scale1); mant = v % steps; v = v / steps; - s->sb_samples[0][k * 12 + l + 1][i] = + s->sb_samples[0][k * 12 + l + 1][i] = l2_unscale_group(steps, mant, scale0); - s->sb_samples[1][k * 12 + l + 1][i] = + s->sb_samples[1][k * 12 + l + 1][i] = l2_unscale_group(steps, mant, scale1); - s->sb_samples[0][k * 12 + l + 2][i] = + s->sb_samples[0][k * 12 + l + 2][i] = l2_unscale_group(steps, v, scale0); - s->sb_samples[1][k * 12 + l + 2][i] = + s->sb_samples[1][k * 12 + l + 2][i] = l2_unscale_group(steps, v, scale1); } else { for(m=0;m<3;m++) { mant = get_bits(&s->gb, bits); - s->sb_samples[0][k * 12 + l + m][i] = + s->sb_samples[0][k * 12 + l + m][i] = l1_unscale(bits - 1, mant, scale0); - s->sb_samples[1][k * 12 + l + m][i] = + s->sb_samples[1][k * 12 + l + m][i] = l1_unscale(bits - 1, mant, scale1); } } @@ -1542,7 +1403,7 @@ static int mp_decode_layer2(MPADecodeContext *s) s->sb_samples[1][k * 12 + l + 2][i] = 0; } /* next subband in alloc table */ - j += 1 << bit_alloc_bits; + j += 1 << bit_alloc_bits; } /* fill remaining samples to zero */ for(i=sblimit;igb.buffer + (get_bits_count(&s->gb)>>3)); - - /* copy old data before current one */ - ptr -= backstep; - memcpy(ptr, s->inbuf1[s->inbuf_index ^ 1] + - BACKSTEP_SIZE + s->old_frame_size - backstep, backstep); - /* init get bits again */ - init_get_bits(&s->gb, ptr, (s->frame_size + backstep)*8); - - /* prepare next buffer */ - s->inbuf_index ^= 1; - s->inbuf = &s->inbuf1[s->inbuf_index][BACKSTEP_SIZE]; - s->old_frame_size = s->frame_size; -} - static inline void lsf_sf_expand(int *slen, int sf, int n1, int n2, int n3) { @@ -1600,7 +1438,7 @@ static inline void lsf_sf_expand(int *slen, slen[0] = sf; } -static void exponents_from_scale_factors(MPADecodeContext *s, +static void exponents_from_scale_factors(MPADecodeContext *s, GranuleDef *g, int16_t *exponents) { @@ -1615,7 +1453,7 @@ static void exponents_from_scale_factors(MPADecodeContext *s, bstab = band_size_long[s->sample_rate_index]; pretab = mpa_pretab[g->preflag]; for(i=0;ilong_end;i++) { - v0 = gain - ((g->scale_factors[i] + pretab[i]) << shift); + v0 = gain - ((g->scale_factors[i] + pretab[i]) << shift) + 400; len = bstab[i]; for(j=len;j>0;j--) *exp_ptr++ = v0; @@ -1630,7 +1468,7 @@ static void exponents_from_scale_factors(MPADecodeContext *s, for(i=g->short_start;i<13;i++) { len = bstab[i]; for(l=0;l<3;l++) { - v0 = gains[l] - (g->scale_factors[k++] << shift); + v0 = gains[l] - (g->scale_factors[k++] << shift) + 400; for(j=len;j>0;j--) *exp_ptr++ = v0; } @@ -1647,18 +1485,32 @@ static inline int get_bitsz(GetBitContext *s, int n) return get_bits(s, n); } + +static void switch_buffer(MPADecodeContext *s, int *pos, int *end_pos, int *end_pos2){ + if(s->in_gb.buffer && *pos >= s->gb.size_in_bits){ + s->gb= s->in_gb; + s->in_gb.buffer=NULL; + assert((get_bits_count(&s->gb) & 7) == 0); + skip_bits_long(&s->gb, *pos - *end_pos); + *end_pos2= + *end_pos= *end_pos2 + get_bits_count(&s->gb) - *pos; + *pos= get_bits_count(&s->gb); + } +} + static int huffman_decode(MPADecodeContext *s, GranuleDef *g, - int16_t *exponents, int end_pos) + int16_t *exponents, int end_pos2) { int s_index; - int linbits, code, x, y, l, v, i, j, k, pos; - GetBitContext last_gb; + int i; + int last_pos, bits_left; VLC *vlc; - uint8_t *code_table; + int end_pos= FFMIN(end_pos2, s->gb.size_in_bits); /* low frequencies (called big values) */ s_index = 0; for(i=0;i<3;i++) { + int j, k, l, linbits; j = g->region_size[i]; if (j == 0) continue; @@ -1667,83 +1519,139 @@ static int huffman_decode(MPADecodeContext *s, GranuleDef *g, l = mpa_huff_data[k][0]; linbits = mpa_huff_data[k][1]; vlc = &huff_vlc[l]; - code_table = huff_code_table[l]; + + if(!l){ + memset(&g->sb_hybrid[s_index], 0, sizeof(*g->sb_hybrid)*2*j); + s_index += 2*j; + continue; + } /* read huffcode and compute each couple */ for(;j>0;j--) { - if (get_bits_count(&s->gb) >= end_pos) - break; - if (code_table) { - code = get_vlc(&s->gb, vlc); - if (code < 0) - return -1; - y = code_table[code]; - x = y >> 4; + int exponent, x, y, v; + int pos= get_bits_count(&s->gb); + + if (pos >= end_pos){ +// av_log(NULL, AV_LOG_ERROR, "pos: %d %d %d %d\n", pos, end_pos, end_pos2, s_index); + switch_buffer(s, &pos, &end_pos, &end_pos2); +// av_log(NULL, AV_LOG_ERROR, "new pos: %d %d\n", pos, end_pos); + if(pos >= end_pos) + break; + } + y = get_vlc2(&s->gb, vlc->table, 7, 3); + + if(!y){ + g->sb_hybrid[s_index ] = + g->sb_hybrid[s_index+1] = 0; + s_index += 2; + continue; + } + + exponent= exponents[s_index]; + + dprintf(s->avctx, "region=%d n=%d x=%d y=%d exp=%d\n", + i, g->region_size[i] - j, x, y, exponent); + if(y&16){ + x = y >> 5; y = y & 0x0f; - } else { - x = 0; - y = 0; - } - dprintf("region=%d n=%d x=%d y=%d exp=%d\n", - i, g->region_size[i] - j, x, y, exponents[s_index]); - if (x) { - if (x == 15) + if (x < 15){ + v = expval_table[ exponent ][ x ]; +// v = expval_table[ (exponent&3) ][ x ] >> FFMIN(0 - (exponent>>2), 31); + }else{ x += get_bitsz(&s->gb, linbits); - v = l3_unscale(x, exponents[s_index]); + v = l3_unscale(x, exponent); + } if (get_bits1(&s->gb)) v = -v; - } else { - v = 0; - } - g->sb_hybrid[s_index++] = v; - if (y) { - if (y == 15) + g->sb_hybrid[s_index] = v; + if (y < 15){ + v = expval_table[ exponent ][ y ]; + }else{ y += get_bitsz(&s->gb, linbits); - v = l3_unscale(y, exponents[s_index]); + v = l3_unscale(y, exponent); + } if (get_bits1(&s->gb)) v = -v; - } else { - v = 0; + g->sb_hybrid[s_index+1] = v; + }else{ + x = y >> 5; + y = y & 0x0f; + x += y; + if (x < 15){ + v = expval_table[ exponent ][ x ]; + }else{ + x += get_bitsz(&s->gb, linbits); + v = l3_unscale(x, exponent); + } + if (get_bits1(&s->gb)) + v = -v; + g->sb_hybrid[s_index+!!y] = v; + g->sb_hybrid[s_index+ !y] = 0; } - g->sb_hybrid[s_index++] = v; + s_index+=2; } } - + /* high frequencies */ vlc = &huff_quad_vlc[g->count1table_select]; - last_gb.buffer = NULL; + last_pos=0; while (s_index <= 572) { + int pos, code; pos = get_bits_count(&s->gb); if (pos >= end_pos) { - if (pos > end_pos && last_gb.buffer != NULL) { + if (pos > end_pos2 && last_pos){ /* some encoders generate an incorrect size for this part. We must go back into the data */ s_index -= 4; - s->gb = last_gb; + skip_bits_long(&s->gb, last_pos - pos); + av_log(s->avctx, AV_LOG_INFO, "overread, skip %d enddists: %d %d\n", last_pos - pos, end_pos-pos, end_pos2-pos); + if(s->error_resilience >= FF_ER_COMPLIANT) + s_index=0; + break; } - break; +// av_log(NULL, AV_LOG_ERROR, "pos2: %d %d %d %d\n", pos, end_pos, end_pos2, s_index); + switch_buffer(s, &pos, &end_pos, &end_pos2); +// av_log(NULL, AV_LOG_ERROR, "new pos2: %d %d %d\n", pos, end_pos, s_index); + if(pos >= end_pos) + break; } - last_gb= s->gb; + last_pos= pos; - code = get_vlc(&s->gb, vlc); - dprintf("t=%d code=%d\n", g->count1table_select, code); - if (code < 0) - return -1; - for(i=0;i<4;i++) { - if (code & (8 >> i)) { - /* non zero value. Could use a hand coded function for - 'one' value */ - v = l3_unscale(1, exponents[s_index]); - if(get_bits1(&s->gb)) - v = -v; - } else { - v = 0; - } - g->sb_hybrid[s_index++] = v; + code = get_vlc2(&s->gb, vlc->table, vlc->bits, 1); + dprintf(s->avctx, "t=%d code=%d\n", g->count1table_select, code); + g->sb_hybrid[s_index+0]= + g->sb_hybrid[s_index+1]= + g->sb_hybrid[s_index+2]= + g->sb_hybrid[s_index+3]= 0; + while(code){ + static const int idxtab[16]={3,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0}; + int v; + int pos= s_index+idxtab[code]; + code ^= 8>>idxtab[code]; + v = exp_table[ exponents[pos] ]; +// v = exp_table[ (exponents[pos]&3) ] >> FFMIN(0 - (exponents[pos]>>2), 31); + if(get_bits1(&s->gb)) + v = -v; + g->sb_hybrid[pos] = v; } + s_index+=4; } - while (s_index < 576) - g->sb_hybrid[s_index++] = 0; + /* skip extension bits */ + bits_left = end_pos2 - get_bits_count(&s->gb); +//av_log(NULL, AV_LOG_ERROR, "left:%d buf:%p\n", bits_left, s->in_gb.buffer); + if (bits_left < 0/* || bits_left > 500*/) { + av_log(s->avctx, AV_LOG_ERROR, "bits_left=%d\n", bits_left); + s_index=0; + }else if(bits_left > 0 && s->error_resilience >= FF_ER_AGGRESSIVE){ + av_log(s->avctx, AV_LOG_ERROR, "bits_left=%d\n", bits_left); + s_index=0; + } + memset(&g->sb_hybrid[s_index], 0, sizeof(*g->sb_hybrid)*(576 - s_index)); + skip_bits_long(&s->gb, bits_left); + + i= get_bits_count(&s->gb); + switch_buffer(s, &i, &end_pos, &end_pos2); + return 0; } @@ -1752,7 +1660,7 @@ static int huffman_decode(MPADecodeContext *s, GranuleDef *g, complicated */ static void reorder_block(MPADecodeContext *s, GranuleDef *g) { - int i, j, k, len; + int i, j, len; int32_t *ptr, *dst, *ptr1; int32_t tmp[576]; @@ -1768,18 +1676,19 @@ static void reorder_block(MPADecodeContext *s, GranuleDef *g) } else { ptr = g->sb_hybrid; } - + for(i=g->short_start;i<13;i++) { len = band_size_short[s->sample_rate_index][i]; ptr1 = ptr; - for(k=0;k<3;k++) { - dst = tmp + k; - for(j=len;j>0;j--) { - *dst = *ptr++; - dst += 3; - } + dst = tmp; + for(j=len;j>0;j--) { + *dst++ = ptr[0*len]; + *dst++ = ptr[1*len]; + *dst++ = ptr[2*len]; + ptr++; } - memcpy(ptr1, tmp, len * 3 * sizeof(int32_t)); + ptr+=2*len; + memcpy(ptr1, tmp, len * 3 * sizeof(*ptr1)); } } @@ -1804,7 +1713,7 @@ static void compute_stereo(MPADecodeContext *s, is_tab = is_table_lsf[g1->scalefac_compress & 1]; sf_max = 16; } - + tab0 = g0->sb_hybrid + 576; tab1 = g1->sb_hybrid + 576; @@ -1855,8 +1764,8 @@ static void compute_stereo(MPADecodeContext *s, } } - non_zero_found = non_zero_found_short[0] | - non_zero_found_short[1] | + non_zero_found = non_zero_found_short[0] | + non_zero_found_short[1] | non_zero_found_short[2]; for(i = g1->long_end - 1;i >= 0;i--) { @@ -1915,8 +1824,8 @@ static void compute_stereo(MPADecodeContext *s, static void compute_antialias_integer(MPADecodeContext *s, GranuleDef *g) { - int32_t *ptr, *p0, *p1, *csa; - int n, i, j; + int32_t *ptr, *csa; + int n, i; /* we antialias only "long" bands */ if (g->block_type == 2) { @@ -1927,47 +1836,36 @@ static void compute_antialias_integer(MPADecodeContext *s, } else { n = SBLIMIT - 1; } - + ptr = g->sb_hybrid + 18; for(i = n;i > 0;i--) { - p0 = ptr - 1; - p1 = ptr; - csa = &csa_table[0][0]; - for(j=0;j<4;j++) { - int tmp0 = *p0; - int tmp1 = *p1; -#if 0 - *p0 = FRAC_RND(MUL64(tmp0, csa[0]) - MUL64(tmp1, csa[1])); - *p1 = FRAC_RND(MUL64(tmp0, csa[1]) + MUL64(tmp1, csa[0])); -#else - int64_t tmp2= MUL64(tmp0 + tmp1, csa[0]); - *p0 = FRAC_RND(tmp2 - MUL64(tmp1, csa[2])); - *p1 = FRAC_RND(tmp2 + MUL64(tmp0, csa[3])); -#endif - p0--; p1++; - csa += 4; - tmp0 = *p0; - tmp1 = *p1; -#if 0 - *p0 = FRAC_RND(MUL64(tmp0, csa[0]) - MUL64(tmp1, csa[1])); - *p1 = FRAC_RND(MUL64(tmp0, csa[1]) + MUL64(tmp1, csa[0])); -#else - tmp2= MUL64(tmp0 + tmp1, csa[0]); - *p0 = FRAC_RND(tmp2 - MUL64(tmp1, csa[2])); - *p1 = FRAC_RND(tmp2 + MUL64(tmp0, csa[3])); -#endif - p0--; p1++; - csa += 4; - } - ptr += 18; + int tmp0, tmp1, tmp2; + csa = &csa_table[0][0]; +#define INT_AA(j) \ + tmp0 = ptr[-1-j];\ + tmp1 = ptr[ j];\ + tmp2= MULH(tmp0 + tmp1, csa[0+4*j]);\ + ptr[-1-j] = 4*(tmp2 - MULH(tmp1, csa[2+4*j]));\ + ptr[ j] = 4*(tmp2 + MULH(tmp0, csa[3+4*j])); + + INT_AA(0) + INT_AA(1) + INT_AA(2) + INT_AA(3) + INT_AA(4) + INT_AA(5) + INT_AA(6) + INT_AA(7) + + ptr += 18; } } static void compute_antialias_float(MPADecodeContext *s, GranuleDef *g) { - int32_t *ptr, *p0, *p1; - int n, i, j; + int32_t *ptr; + int n, i; /* we antialias only "long" bands */ if (g->block_type == 2) { @@ -1978,52 +1876,38 @@ static void compute_antialias_float(MPADecodeContext *s, } else { n = SBLIMIT - 1; } - + ptr = g->sb_hybrid + 18; for(i = n;i > 0;i--) { - float *csa = &csa_table_float[0][0]; - p0 = ptr - 1; - p1 = ptr; - for(j=0;j<4;j++) { - float tmp0 = *p0; - float tmp1 = *p1; -#if 1 - *p0 = lrintf(tmp0 * csa[0] - tmp1 * csa[1]); - *p1 = lrintf(tmp0 * csa[1] + tmp1 * csa[0]); -#else - float tmp2= (tmp0 + tmp1) * csa[0]; - *p0 = lrintf(tmp2 - tmp1 * csa[2]); - *p1 = lrintf(tmp2 + tmp0 * csa[3]); -#endif - p0--; p1++; - csa += 4; - tmp0 = *p0; - tmp1 = *p1; -#if 1 - *p0 = lrintf(tmp0 * csa[0] - tmp1 * csa[1]); - *p1 = lrintf(tmp0 * csa[1] + tmp1 * csa[0]); -#else - tmp2= (tmp0 + tmp1) * csa[0]; - *p0 = lrintf(tmp2 - tmp1 * csa[2]); - *p1 = lrintf(tmp2 + tmp0 * csa[3]); -#endif - p0--; p1++; - csa += 4; - } - ptr += 18; + float tmp0, tmp1; + float *csa = &csa_table_float[0][0]; +#define FLOAT_AA(j)\ + tmp0= ptr[-1-j];\ + tmp1= ptr[ j];\ + ptr[-1-j] = lrintf(tmp0 * csa[0+4*j] - tmp1 * csa[1+4*j]);\ + ptr[ j] = lrintf(tmp0 * csa[1+4*j] + tmp1 * csa[0+4*j]); + + FLOAT_AA(0) + FLOAT_AA(1) + FLOAT_AA(2) + FLOAT_AA(3) + FLOAT_AA(4) + FLOAT_AA(5) + FLOAT_AA(6) + FLOAT_AA(7) + + ptr += 18; } } static void compute_imdct(MPADecodeContext *s, - GranuleDef *g, + GranuleDef *g, int32_t *sb_samples, int32_t *mdct_buf) { - int32_t *ptr, *win, *win1, *buf, *buf2, *out_ptr, *ptr1; - int32_t in[6]; - int32_t out[36]; + int32_t *ptr, *win, *win1, *buf, *out_ptr, *ptr1; int32_t out2[12]; - int i, j, k, mdct_long_end, v, sblimit; + int i, j, mdct_long_end, v, sblimit; /* find last non zero block */ ptr = g->sb_hybrid + 576; @@ -2049,7 +1933,6 @@ static void compute_imdct(MPADecodeContext *s, buf = mdct_buf; ptr = g->sb_hybrid; for(j=0;jblock_type]; /* select frequency inversion */ win = win1 + ((4 * 36) & -(j & 1)); - for(i=0;i<18;i++) { - *out_ptr = MULL(out[i], win[i]) + buf[i]; - buf[i] = MULL(out[i + 18], win[i + 18]); - out_ptr += SBLIMIT; - } + imdct36(out_ptr, buf, ptr, win); + out_ptr += 18*SBLIMIT; ptr += 18; buf += 18; } for(j=mdct_long_end;jlsf) { main_data_begin = get_bits(&s->gb, 8); - if (s->nb_channels == 2) - private_bits = get_bits(&s->gb, 2); - else - private_bits = get_bits(&s->gb, 1); + private_bits = get_bits(&s->gb, s->nb_channels); nb_granules = 1; } else { main_data_begin = get_bits(&s->gb, 9); @@ -2185,47 +2058,43 @@ static int mp_decode_layer3(MPADecodeContext *s) granules[ch][1].scfsi = get_bits(&s->gb, 4); } } - + for(gr=0;grnb_channels;ch++) { - dprintf("gr=%d ch=%d: side_info\n", gr, ch); + dprintf(s->avctx, "gr=%d ch=%d: side_info\n", gr, ch); g = &granules[ch][gr]; g->part2_3_length = get_bits(&s->gb, 12); g->big_values = get_bits(&s->gb, 9); + if(g->big_values > 288){ + av_log(s->avctx, AV_LOG_ERROR, "big_values too big\n"); + return -1; + } + g->global_gain = get_bits(&s->gb, 8); /* if MS stereo only is selected, we precompute the 1/sqrt(2) renormalization factor */ - if ((s->mode_ext & (MODE_EXT_MS_STEREO | MODE_EXT_I_STEREO)) == + if ((s->mode_ext & (MODE_EXT_MS_STEREO | MODE_EXT_I_STEREO)) == MODE_EXT_MS_STEREO) g->global_gain -= 2; if (s->lsf) g->scalefac_compress = get_bits(&s->gb, 9); else g->scalefac_compress = get_bits(&s->gb, 4); - blocksplit_flag = get_bits(&s->gb, 1); + blocksplit_flag = get_bits1(&s->gb); if (blocksplit_flag) { g->block_type = get_bits(&s->gb, 2); - if (g->block_type == 0) + if (g->block_type == 0){ + av_log(s->avctx, AV_LOG_ERROR, "invalid block type\n"); return -1; - g->switch_point = get_bits(&s->gb, 1); + } + g->switch_point = get_bits1(&s->gb); for(i=0;i<2;i++) g->table_select[i] = get_bits(&s->gb, 5); - for(i=0;i<3;i++) + for(i=0;i<3;i++) g->subblock_gain[i] = get_bits(&s->gb, 3); - /* compute huffman coded region sizes */ - if (g->block_type == 2) - g->region_size[0] = (36 / 2); - else { - if (s->sample_rate_index <= 2) - g->region_size[0] = (36 / 2); - else if (s->sample_rate_index != 8) - g->region_size[0] = (54 / 2); - else - g->region_size[0] = (108 / 2); - } - g->region_size[1] = (576 / 2); + ff_init_short_region(s, g); } else { - int region_address1, region_address2, l; + int region_address1, region_address2; g->block_type = 0; g->switch_point = 0; for(i=0;i<3;i++) @@ -2233,75 +2102,54 @@ static int mp_decode_layer3(MPADecodeContext *s) /* compute huffman coded region sizes */ region_address1 = get_bits(&s->gb, 4); region_address2 = get_bits(&s->gb, 3); - dprintf("region1=%d region2=%d\n", + dprintf(s->avctx, "region1=%d region2=%d\n", region_address1, region_address2); - g->region_size[0] = - band_index_long[s->sample_rate_index][region_address1 + 1] >> 1; - l = region_address1 + region_address2 + 2; - /* should not overflow */ - if (l > 22) - l = 22; - g->region_size[1] = - band_index_long[s->sample_rate_index][l] >> 1; - } - /* convert region offsets to region sizes and truncate - size to big_values */ - g->region_size[2] = (576 / 2); - j = 0; - for(i=0;i<3;i++) { - k = g->region_size[i]; - if (k > g->big_values) - k = g->big_values; - g->region_size[i] = k - j; - j = k; + ff_init_long_region(s, g, region_address1, region_address2); } + ff_region_offset2size(g); + ff_compute_band_indexes(s, g); - /* compute band indexes */ - if (g->block_type == 2) { - if (g->switch_point) { - /* if switched mode, we handle the 36 first samples as - long blocks. For 8000Hz, we handle the 48 first - exponents as long blocks (XXX: check this!) */ - if (s->sample_rate_index <= 2) - g->long_end = 8; - else if (s->sample_rate_index != 8) - g->long_end = 6; - else - g->long_end = 4; /* 8000 Hz */ - - if (s->sample_rate_index != 8) - g->short_start = 3; - else - g->short_start = 2; - } else { - g->long_end = 0; - g->short_start = 0; - } - } else { - g->short_start = 13; - g->long_end = 22; - } - g->preflag = 0; if (!s->lsf) - g->preflag = get_bits(&s->gb, 1); - g->scalefac_scale = get_bits(&s->gb, 1); - g->count1table_select = get_bits(&s->gb, 1); - dprintf("block_type=%d switch_point=%d\n", + g->preflag = get_bits1(&s->gb); + g->scalefac_scale = get_bits1(&s->gb); + g->count1table_select = get_bits1(&s->gb); + dprintf(s->avctx, "block_type=%d switch_point=%d\n", g->block_type, g->switch_point); } } + if (!s->adu_mode) { + const uint8_t *ptr = s->gb.buffer + (get_bits_count(&s->gb)>>3); + assert((get_bits_count(&s->gb) & 7) == 0); /* now we get bits from the main_data_begin offset */ - dprintf("seekback: %d\n", main_data_begin); - seek_to_maindata(s, main_data_begin); + dprintf(s->avctx, "seekback: %d\n", main_data_begin); +//av_log(NULL, AV_LOG_ERROR, "backstep:%d, lastbuf:%d\n", main_data_begin, s->last_buf_size); + + memcpy(s->last_buf + s->last_buf_size, ptr, EXTRABYTES); + s->in_gb= s->gb; + init_get_bits(&s->gb, s->last_buf, s->last_buf_size*8); + skip_bits_long(&s->gb, 8*(s->last_buf_size - main_data_begin)); + } for(gr=0;grnb_channels;ch++) { g = &granules[ch][gr]; - + if(get_bits_count(&s->gb)<0){ + av_log(s->avctx, AV_LOG_ERROR, "mdb:%d, lastbuf:%d skipping granule %d\n", + main_data_begin, s->last_buf_size, gr); + skip_bits_long(&s->gb, g->part2_3_length); + memset(g->sb_hybrid, 0, sizeof(g->sb_hybrid)); + if(get_bits_count(&s->gb) >= s->gb.size_in_bits && s->in_gb.buffer){ + skip_bits_long(&s->in_gb, get_bits_count(&s->gb) - s->gb.size_in_bits); + s->gb= s->in_gb; + s->in_gb.buffer=NULL; + } + continue; + } + bits_pos = get_bits_count(&s->gb); - + if (!s->lsf) { uint8_t *sc; int slen, slen1, slen2; @@ -2309,16 +2157,26 @@ static int mp_decode_layer3(MPADecodeContext *s) /* MPEG1 scale factors */ slen1 = slen_table[0][g->scalefac_compress]; slen2 = slen_table[1][g->scalefac_compress]; - dprintf("slen1=%d slen2=%d\n", slen1, slen2); + dprintf(s->avctx, "slen1=%d slen2=%d\n", slen1, slen2); if (g->block_type == 2) { n = g->switch_point ? 17 : 18; j = 0; - for(i=0;iscale_factors[j++] = get_bitsz(&s->gb, slen1); - for(i=0;i<18;i++) - g->scale_factors[j++] = get_bitsz(&s->gb, slen2); - for(i=0;i<3;i++) - g->scale_factors[j++] = 0; + if(slen1){ + for(i=0;iscale_factors[j++] = get_bits(&s->gb, slen1); + }else{ + for(i=0;iscale_factors[j++] = 0; + } + if(slen2){ + for(i=0;i<18;i++) + g->scale_factors[j++] = get_bits(&s->gb, slen2); + for(i=0;i<3;i++) + g->scale_factors[j++] = 0; + }else{ + for(i=0;i<21;i++) + g->scale_factors[j++] = 0; + } } else { sc = granules[ch][0].scale_factors; j = 0; @@ -2326,8 +2184,13 @@ static int mp_decode_layer3(MPADecodeContext *s) n = (k == 0 ? 6 : 5); if ((g->scfsi & (0x8 >> k)) == 0) { slen = (k < 2) ? slen1 : slen2; - for(i=0;iscale_factors[j++] = get_bitsz(&s->gb, slen); + if(slen){ + for(i=0;iscale_factors[j++] = get_bits(&s->gb, slen); + }else{ + for(i=0;iscale_factors[j++] = 0; + } } else { /* simply copy from last granule */ for(i=0;iavctx, "scfsi=%x gr=%d ch=%d scale_factors:\n", g->scfsi, gr, ch); for(i=0;iscale_factors[i]); - printf("\n"); + dprintf(s->avctx, " %d", g->scale_factors[i]); + dprintf(s->avctx, "\n"); } #endif } else { @@ -2389,19 +2252,24 @@ static int mp_decode_layer3(MPADecodeContext *s) for(k=0;k<4;k++) { n = lsf_nsf_table[tindex2][tindex][k]; sl = slen[k]; - for(i=0;iscale_factors[j++] = get_bitsz(&s->gb, sl); + if(sl){ + for(i=0;iscale_factors[j++] = get_bits(&s->gb, sl); + }else{ + for(i=0;iscale_factors[j++] = 0; + } } /* XXX: should compute exact size */ for(;j<40;j++) g->scale_factors[j] = 0; #if defined(DEBUG) { - printf("gr=%d ch=%d scale_factors:\n", + dprintf(s->avctx, "gr=%d ch=%d scale_factors:\n", gr, ch); for(i=0;i<40;i++) - printf(" %d", g->scale_factors[i]); - printf("\n"); + dprintf(s->avctx, " %d", g->scale_factors[i]); + dprintf(s->avctx, "\n"); } #endif } @@ -2409,25 +2277,10 @@ static int mp_decode_layer3(MPADecodeContext *s) exponents_from_scale_factors(s, g, exponents); /* read Huffman coded residue */ - if (huffman_decode(s, g, exponents, - bits_pos + g->part2_3_length) < 0) - return -1; + huffman_decode(s, g, exponents, bits_pos + g->part2_3_length); #if defined(DEBUG) sample_dump(0, g->sb_hybrid, 576); #endif - - /* skip extension bits */ - bits_left = g->part2_3_length - (get_bits_count(&s->gb) - bits_pos); - if (bits_left < 0) { - dprintf("bits_left=%d\n", bits_left); - return -1; - } - while (bits_left >= 16) { - skip_bits(&s->gb, 16); - bits_left -= 16; - } - if (bits_left > 0) - skip_bits(&s->gb, bits_left); } /* ch */ if (s->nb_channels == 2) @@ -2444,49 +2297,79 @@ static int mp_decode_layer3(MPADecodeContext *s) #if defined(DEBUG) sample_dump(1, g->sb_hybrid, 576); #endif - compute_imdct(s, g, &s->sb_samples[ch][18 * gr][0], s->mdct_buf[ch]); + compute_imdct(s, g, &s->sb_samples[ch][18 * gr][0], s->mdct_buf[ch]); #if defined(DEBUG) sample_dump(2, &s->sb_samples[ch][18 * gr][0], 576); #endif } } /* gr */ + if(get_bits_count(&s->gb)<0) + skip_bits_long(&s->gb, -get_bits_count(&s->gb)); return nb_granules * 18; } -static int mp_decode_frame(MPADecodeContext *s, - short *samples) +static int mp_decode_frame(MPADecodeContext *s, + OUT_INT *samples, const uint8_t *buf, int buf_size) { int i, nb_frames, ch; - short *samples_ptr; + OUT_INT *samples_ptr; + + init_get_bits(&s->gb, buf + HEADER_SIZE, (buf_size - HEADER_SIZE)*8); - init_get_bits(&s->gb, s->inbuf + HEADER_SIZE, - (s->inbuf_ptr - s->inbuf - HEADER_SIZE)*8); - /* skip error protection field */ if (s->error_protection) - get_bits(&s->gb, 16); + skip_bits(&s->gb, 16); - dprintf("frame %d:\n", s->frame_count); + dprintf(s->avctx, "frame %d:\n", s->frame_count); switch(s->layer) { case 1: + s->avctx->frame_size = 384; nb_frames = mp_decode_layer1(s); break; case 2: + s->avctx->frame_size = 1152; nb_frames = mp_decode_layer2(s); break; case 3: + s->avctx->frame_size = s->lsf ? 576 : 1152; default: nb_frames = mp_decode_layer3(s); + + s->last_buf_size=0; + if(s->in_gb.buffer){ + align_get_bits(&s->gb); + i= (s->gb.size_in_bits - get_bits_count(&s->gb))>>3; + if(i >= 0 && i <= BACKSTEP_SIZE){ + memmove(s->last_buf, s->gb.buffer + (get_bits_count(&s->gb)>>3), i); + s->last_buf_size=i; + }else + av_log(s->avctx, AV_LOG_ERROR, "invalid old backstep %d\n", i); + s->gb= s->in_gb; + s->in_gb.buffer= NULL; + } + + align_get_bits(&s->gb); + assert((get_bits_count(&s->gb) & 7) == 0); + i= (s->gb.size_in_bits - get_bits_count(&s->gb))>>3; + + if(i<0 || i > BACKSTEP_SIZE || nb_frames<0){ + av_log(s->avctx, AV_LOG_WARNING, "invalid new backstep %d\n", i); + i= FFMIN(BACKSTEP_SIZE, buf_size - HEADER_SIZE); + } + assert(i <= buf_size - HEADER_SIZE && i>= 0); + memcpy(s->last_buf + s->last_buf_size, s->gb.buffer + buf_size - HEADER_SIZE - i, i); + s->last_buf_size += i; + break; } #if defined(DEBUG) for(i=0;inb_channels;ch++) { int j; - printf("%d-%d:", i, ch); + dprintf(s->avctx, "%d-%d:", i, ch); for(j=0;jsb_samples[ch][i][j] / FRAC_ONE); - printf("\n"); + dprintf(s->avctx, " %0.6f", (double)s->sb_samples[ch][i][j] / FRAC_ONE); + dprintf(s->avctx, "\n"); } } #endif @@ -2494,176 +2377,288 @@ static int mp_decode_frame(MPADecodeContext *s, for(ch=0;chnb_channels;ch++) { samples_ptr = samples + ch; for(i=0;inb_channels, + ff_mpa_synth_filter(s->synth_buf[ch], &(s->synth_buf_offset[ch]), + window, &s->dither_state, + samples_ptr, s->nb_channels, s->sb_samples[ch][i]); samples_ptr += 32 * s->nb_channels; } } #ifdef DEBUG - s->frame_count++; + s->frame_count++; #endif - return nb_frames * 32 * sizeof(short) * s->nb_channels; + return nb_frames * 32 * sizeof(OUT_INT) * s->nb_channels; } static int decode_frame(AVCodecContext * avctx, - void *data, int *data_size, - uint8_t * buf, int buf_size) + void *data, int *data_size, + const uint8_t * buf, int buf_size) { MPADecodeContext *s = avctx->priv_data; uint32_t header; - uint8_t *buf_ptr; - int len, out_size; - short *out_samples = data; + int out_size; + OUT_INT *out_samples = data; - *data_size = 0; - buf_ptr = buf; - while (buf_size > 0) { - len = s->inbuf_ptr - s->inbuf; - if (s->frame_size == 0) { - /* special case for next header for first frame in free - format case (XXX: find a simpler method) */ - if (s->free_format_next_header != 0) { - s->inbuf[0] = s->free_format_next_header >> 24; - s->inbuf[1] = s->free_format_next_header >> 16; - s->inbuf[2] = s->free_format_next_header >> 8; - s->inbuf[3] = s->free_format_next_header; - s->inbuf_ptr = s->inbuf + 4; - s->free_format_next_header = 0; - goto got_header; - } - /* no header seen : find one. We need at least HEADER_SIZE - bytes to parse it */ - len = HEADER_SIZE - len; - if (len > buf_size) - len = buf_size; - if (len > 0) { - memcpy(s->inbuf_ptr, buf_ptr, len); - buf_ptr += len; - buf_size -= len; - s->inbuf_ptr += len; - } - if ((s->inbuf_ptr - s->inbuf) >= HEADER_SIZE) { - got_header: - header = (s->inbuf[0] << 24) | (s->inbuf[1] << 16) | - (s->inbuf[2] << 8) | s->inbuf[3]; +retry: + if(buf_size < HEADER_SIZE) + return -1; - if (check_header(header) < 0) { - /* no sync found : move by one byte (inefficient, but simple!) */ - memmove(s->inbuf, s->inbuf + 1, s->inbuf_ptr - s->inbuf - 1); - s->inbuf_ptr--; - dprintf("skip %x\n", header); - /* reset free format frame size to give a chance - to get a new bitrate */ - s->free_format_frame_size = 0; - } else { - if (decode_header(s, header) == 1) { - /* free format: prepare to compute frame size */ - s->frame_size = -1; - } - /* update codec info */ - avctx->sample_rate = s->sample_rate; - avctx->channels = s->nb_channels; - avctx->bit_rate = s->bit_rate; - avctx->sub_id = s->layer; - switch(s->layer) { - case 1: - avctx->frame_size = 384; - break; - case 2: - avctx->frame_size = 1152; - break; - case 3: - if (s->lsf) - avctx->frame_size = 576; - else - avctx->frame_size = 1152; - break; - } - } - } - } else if (s->frame_size == -1) { - /* free format : find next sync to compute frame size */ - len = MPA_MAX_CODED_FRAME_SIZE - len; - if (len > buf_size) - len = buf_size; - if (len == 0) { - /* frame too long: resync */ - s->frame_size = 0; - memmove(s->inbuf, s->inbuf + 1, s->inbuf_ptr - s->inbuf - 1); - s->inbuf_ptr--; - } else { - uint8_t *p, *pend; - uint32_t header1; - int padding; - - memcpy(s->inbuf_ptr, buf_ptr, len); - /* check for header */ - p = s->inbuf_ptr - 3; - pend = s->inbuf_ptr + len - 4; - while (p <= pend) { - header = (p[0] << 24) | (p[1] << 16) | - (p[2] << 8) | p[3]; - header1 = (s->inbuf[0] << 24) | (s->inbuf[1] << 16) | - (s->inbuf[2] << 8) | s->inbuf[3]; - /* check with high probability that we have a - valid header */ - if ((header & SAME_HEADER_MASK) == - (header1 & SAME_HEADER_MASK)) { - /* header found: update pointers */ - len = (p + 4) - s->inbuf_ptr; - buf_ptr += len; - buf_size -= len; - s->inbuf_ptr = p; - /* compute frame size */ - s->free_format_next_header = header; - s->free_format_frame_size = s->inbuf_ptr - s->inbuf; - padding = (header1 >> 9) & 1; - if (s->layer == 1) - s->free_format_frame_size -= padding * 4; - else - s->free_format_frame_size -= padding; - dprintf("free frame size=%d padding=%d\n", - s->free_format_frame_size, padding); - decode_header(s, header1); - goto next_data; - } - p++; - } - /* not found: simply increase pointers */ - buf_ptr += len; - s->inbuf_ptr += len; - buf_size -= len; - } - } else if (len < s->frame_size) { - if (s->frame_size > MPA_MAX_CODED_FRAME_SIZE) - s->frame_size = MPA_MAX_CODED_FRAME_SIZE; - len = s->frame_size - len; - if (len > buf_size) - len = buf_size; - memcpy(s->inbuf_ptr, buf_ptr, len); - buf_ptr += len; - s->inbuf_ptr += len; - buf_size -= len; - } - next_data: - if (s->frame_size > 0 && - (s->inbuf_ptr - s->inbuf) >= s->frame_size) { - if (avctx->parse_only) { - /* simply return the frame data */ - *(uint8_t **)data = s->inbuf; - out_size = s->inbuf_ptr - s->inbuf; - } else { - out_size = mp_decode_frame(s, out_samples); - } - s->inbuf_ptr = s->inbuf; - s->frame_size = 0; - *data_size = out_size; - break; - } + header = AV_RB32(buf); + if(ff_mpa_check_header(header) < 0){ + buf++; +// buf_size--; + av_log(avctx, AV_LOG_ERROR, "Header missing skipping one byte.\n"); + goto retry; } - return buf_ptr - buf; + + if (ff_mpegaudio_decode_header(s, header) == 1) { + /* free format: prepare to compute frame size */ + s->frame_size = -1; + return -1; + } + /* update codec info */ + avctx->channels = s->nb_channels; + avctx->bit_rate = s->bit_rate; + avctx->sub_id = s->layer; + + if(s->frame_size<=0 || s->frame_size > buf_size){ + av_log(avctx, AV_LOG_ERROR, "incomplete frame\n"); + return -1; + }else if(s->frame_size < buf_size){ + av_log(avctx, AV_LOG_ERROR, "incorrect frame size\n"); + buf_size= s->frame_size; + } + + out_size = mp_decode_frame(s, out_samples, buf, buf_size); + if(out_size>=0){ + *data_size = out_size; + avctx->sample_rate = s->sample_rate; + //FIXME maybe move the other codec info stuff from above here too + }else + av_log(avctx, AV_LOG_DEBUG, "Error while decoding MPEG audio frame.\n"); //FIXME return -1 / but also return the number of bytes consumed + s->frame_size = 0; + return buf_size; } +static void flush(AVCodecContext *avctx){ + MPADecodeContext *s = avctx->priv_data; + memset(s->synth_buf, 0, sizeof(s->synth_buf)); + s->last_buf_size= 0; +} + +#ifdef CONFIG_MP3ADU_DECODER +static int decode_frame_adu(AVCodecContext * avctx, + void *data, int *data_size, + const uint8_t * buf, int buf_size) +{ + MPADecodeContext *s = avctx->priv_data; + uint32_t header; + int len, out_size; + OUT_INT *out_samples = data; + + len = buf_size; + + // Discard too short frames + if (buf_size < HEADER_SIZE) { + *data_size = 0; + return buf_size; + } + + + if (len > MPA_MAX_CODED_FRAME_SIZE) + len = MPA_MAX_CODED_FRAME_SIZE; + + // Get header and restore sync word + header = AV_RB32(buf) | 0xffe00000; + + if (ff_mpa_check_header(header) < 0) { // Bad header, discard frame + *data_size = 0; + return buf_size; + } + + ff_mpegaudio_decode_header(s, header); + /* update codec info */ + avctx->sample_rate = s->sample_rate; + avctx->channels = s->nb_channels; + avctx->bit_rate = s->bit_rate; + avctx->sub_id = s->layer; + + s->frame_size = len; + + if (avctx->parse_only) { + out_size = buf_size; + } else { + out_size = mp_decode_frame(s, out_samples, buf, buf_size); + } + + *data_size = out_size; + return buf_size; +} +#endif /* CONFIG_MP3ADU_DECODER */ + +#ifdef CONFIG_MP3ON4_DECODER + +/** + * Context for MP3On4 decoder + */ +typedef struct MP3On4DecodeContext { + int frames; ///< number of mp3 frames per block (number of mp3 decoder instances) + int syncword; ///< syncword patch + const uint8_t *coff; ///< channels offsets in output buffer + MPADecodeContext *mp3decctx[5]; ///< MPADecodeContext for every decoder instance +} MP3On4DecodeContext; + +#include "mpeg4audio.h" + +/* Next 3 arrays are indexed by channel config number (passed via codecdata) */ +static const uint8_t mp3Frames[8] = {0,1,1,2,3,3,4,5}; /* number of mp3 decoder instances */ +/* offsets into output buffer, assume output order is FL FR BL BR C LFE */ +static const uint8_t chan_offset[8][5] = { + {0}, + {0}, // C + {0}, // FLR + {2,0}, // C FLR + {2,0,3}, // C FLR BS + {4,0,2}, // C FLR BLRS + {4,0,2,5}, // C FLR BLRS LFE + {4,0,2,6,5}, // C FLR BLRS BLR LFE +}; + + +static int decode_init_mp3on4(AVCodecContext * avctx) +{ + MP3On4DecodeContext *s = avctx->priv_data; + MPEG4AudioConfig cfg; + int i; + + if ((avctx->extradata_size < 2) || (avctx->extradata == NULL)) { + av_log(avctx, AV_LOG_ERROR, "Codec extradata missing or too short.\n"); + return -1; + } + + ff_mpeg4audio_get_config(&cfg, avctx->extradata, avctx->extradata_size); + if (!cfg.chan_config || cfg.chan_config > 7) { + av_log(avctx, AV_LOG_ERROR, "Invalid channel config number.\n"); + return -1; + } + s->frames = mp3Frames[cfg.chan_config]; + s->coff = chan_offset[cfg.chan_config]; + avctx->channels = ff_mpeg4audio_channels[cfg.chan_config]; + + if (cfg.sample_rate < 16000) + s->syncword = 0xffe00000; + else + s->syncword = 0xfff00000; + + /* Init the first mp3 decoder in standard way, so that all tables get builded + * We replace avctx->priv_data with the context of the first decoder so that + * decode_init() does not have to be changed. + * Other decoders will be initialized here copying data from the first context + */ + // Allocate zeroed memory for the first decoder context + s->mp3decctx[0] = av_mallocz(sizeof(MPADecodeContext)); + // Put decoder context in place to make init_decode() happy + avctx->priv_data = s->mp3decctx[0]; + decode_init(avctx); + // Restore mp3on4 context pointer + avctx->priv_data = s; + s->mp3decctx[0]->adu_mode = 1; // Set adu mode + + /* Create a separate codec/context for each frame (first is already ok). + * Each frame is 1 or 2 channels - up to 5 frames allowed + */ + for (i = 1; i < s->frames; i++) { + s->mp3decctx[i] = av_mallocz(sizeof(MPADecodeContext)); + s->mp3decctx[i]->compute_antialias = s->mp3decctx[0]->compute_antialias; + s->mp3decctx[i]->adu_mode = 1; + s->mp3decctx[i]->avctx = avctx; + } + + return 0; +} + + +static int decode_close_mp3on4(AVCodecContext * avctx) +{ + MP3On4DecodeContext *s = avctx->priv_data; + int i; + + for (i = 0; i < s->frames; i++) + if (s->mp3decctx[i]) + av_free(s->mp3decctx[i]); + + return 0; +} + + +static int decode_frame_mp3on4(AVCodecContext * avctx, + void *data, int *data_size, + const uint8_t * buf, int buf_size) +{ + MP3On4DecodeContext *s = avctx->priv_data; + MPADecodeContext *m; + int fsize, len = buf_size, out_size = 0; + uint32_t header; + OUT_INT *out_samples = data; + OUT_INT decoded_buf[MPA_FRAME_SIZE * MPA_MAX_CHANNELS]; + OUT_INT *outptr, *bp; + int fr, j, n; + + *data_size = 0; + // Discard too short frames + if (buf_size < HEADER_SIZE) + return -1; + + // If only one decoder interleave is not needed + outptr = s->frames == 1 ? out_samples : decoded_buf; + + avctx->bit_rate = 0; + + for (fr = 0; fr < s->frames; fr++) { + fsize = AV_RB16(buf) >> 4; + fsize = FFMIN3(fsize, len, MPA_MAX_CODED_FRAME_SIZE); + m = s->mp3decctx[fr]; + assert (m != NULL); + + header = (AV_RB32(buf) & 0x000fffff) | s->syncword; // patch header + + if (ff_mpa_check_header(header) < 0) // Bad header, discard block + break; + + ff_mpegaudio_decode_header(m, header); + out_size += mp_decode_frame(m, outptr, buf, fsize); + buf += fsize; + len -= fsize; + + if(s->frames > 1) { + n = m->avctx->frame_size*m->nb_channels; + /* interleave output data */ + bp = out_samples + s->coff[fr]; + if(m->nb_channels == 1) { + for(j = 0; j < n; j++) { + *bp = decoded_buf[j]; + bp += avctx->channels; + } + } else { + for(j = 0; j < n; j++) { + bp[0] = decoded_buf[j++]; + bp[1] = decoded_buf[j]; + bp += avctx->channels; + } + } + } + avctx->bit_rate += m->bit_rate; + } + + /* update codec info */ + avctx->sample_rate = s->mp3decctx[0]->sample_rate; + + *data_size = out_size; + return buf_size; +} +#endif /* CONFIG_MP3ON4_DECODER */ + +#ifdef CONFIG_MP2_DECODER AVCodec mp2_decoder = { "mp2", @@ -2675,8 +2670,11 @@ AVCodec mp2_decoder = NULL, decode_frame, CODEC_CAP_PARSE_ONLY, + .flush= flush, + .long_name= NULL_IF_CONFIG_SMALL("MP2 (MPEG audio layer 2)"), }; - +#endif +#ifdef CONFIG_MP3_DECODER AVCodec mp3_decoder = { "mp3", @@ -2688,4 +2686,38 @@ AVCodec mp3_decoder = NULL, decode_frame, CODEC_CAP_PARSE_ONLY, + .flush= flush, + .long_name= NULL_IF_CONFIG_SMALL("MP3 (MPEG audio layer 3)"), }; +#endif +#ifdef CONFIG_MP3ADU_DECODER +AVCodec mp3adu_decoder = +{ + "mp3adu", + CODEC_TYPE_AUDIO, + CODEC_ID_MP3ADU, + sizeof(MPADecodeContext), + decode_init, + NULL, + NULL, + decode_frame_adu, + CODEC_CAP_PARSE_ONLY, + .flush= flush, + .long_name= NULL_IF_CONFIG_SMALL("ADU (Application Data Unit) MP3 (MPEG audio layer 3)"), +}; +#endif +#ifdef CONFIG_MP3ON4_DECODER +AVCodec mp3on4_decoder = +{ + "mp3on4", + CODEC_TYPE_AUDIO, + CODEC_ID_MP3ON4, + sizeof(MP3On4DecodeContext), + decode_init_mp3on4, + NULL, + decode_close_mp3on4, + decode_frame_mp3on4, + .flush= flush, + .long_name= NULL_IF_CONFIG_SMALL("MP3onMP4"), +}; +#endif diff --git a/src/add-ons/media/plugins/avcodec/libavcodec/mpegaudiodecheader.c b/src/add-ons/media/plugins/avcodec/libavcodec/mpegaudiodecheader.c new file mode 100644 index 0000000000..efea499619 --- /dev/null +++ b/src/add-ons/media/plugins/avcodec/libavcodec/mpegaudiodecheader.c @@ -0,0 +1,109 @@ +/* + * MPEG Audio header decoder + * Copyright (c) 2001, 2002 Fabrice Bellard. + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file mpegaudiodecheader.c + * MPEG Audio header decoder. + */ + +//#define DEBUG +#include "avcodec.h" +#include "mpegaudio.h" +#include "mpegaudiodata.h" + + +int ff_mpegaudio_decode_header(MPADecodeContext *s, uint32_t header) +{ + int sample_rate, frame_size, mpeg25, padding; + int sample_rate_index, bitrate_index; + if (header & (1<<20)) { + s->lsf = (header & (1<<19)) ? 0 : 1; + mpeg25 = 0; + } else { + s->lsf = 1; + mpeg25 = 1; + } + + s->layer = 4 - ((header >> 17) & 3); + /* extract frequency */ + sample_rate_index = (header >> 10) & 3; + sample_rate = ff_mpa_freq_tab[sample_rate_index] >> (s->lsf + mpeg25); + sample_rate_index += 3 * (s->lsf + mpeg25); + s->sample_rate_index = sample_rate_index; + s->error_protection = ((header >> 16) & 1) ^ 1; + s->sample_rate = sample_rate; + + bitrate_index = (header >> 12) & 0xf; + padding = (header >> 9) & 1; + //extension = (header >> 8) & 1; + s->mode = (header >> 6) & 3; + s->mode_ext = (header >> 4) & 3; + //copyright = (header >> 3) & 1; + //original = (header >> 2) & 1; + //emphasis = header & 3; + + if (s->mode == MPA_MONO) + s->nb_channels = 1; + else + s->nb_channels = 2; + + if (bitrate_index != 0) { + frame_size = ff_mpa_bitrate_tab[s->lsf][s->layer - 1][bitrate_index]; + s->bit_rate = frame_size * 1000; + switch(s->layer) { + case 1: + frame_size = (frame_size * 12000) / sample_rate; + frame_size = (frame_size + padding) * 4; + break; + case 2: + frame_size = (frame_size * 144000) / sample_rate; + frame_size += padding; + break; + default: + case 3: + frame_size = (frame_size * 144000) / (sample_rate << s->lsf); + frame_size += padding; + break; + } + s->frame_size = frame_size; + } else { + /* if no frame size computed, signal it */ + return 1; + } + +#if defined(DEBUG) + dprintf(s->avctx, "layer%d, %d Hz, %d kbits/s, ", + s->layer, s->sample_rate, s->bit_rate); + if (s->nb_channels == 2) { + if (s->layer == 3) { + if (s->mode_ext & MODE_EXT_MS_STEREO) + dprintf(s->avctx, "ms-"); + if (s->mode_ext & MODE_EXT_I_STEREO) + dprintf(s->avctx, "i-"); + } + dprintf(s->avctx, "stereo"); + } else { + dprintf(s->avctx, "mono"); + } + dprintf(s->avctx, "\n"); +#endif + return 0; +} diff --git a/src/add-ons/media/plugins/avcodec/libavcodec/mpegaudiodecheader.h b/src/add-ons/media/plugins/avcodec/libavcodec/mpegaudiodecheader.h new file mode 100644 index 0000000000..21e8cf016d --- /dev/null +++ b/src/add-ons/media/plugins/avcodec/libavcodec/mpegaudiodecheader.h @@ -0,0 +1,39 @@ +/* + * MPEG Audio header decoder + * Copyright (c) 2001, 2002 Fabrice Bellard. + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file mpegaudiodecheader.c + * MPEG Audio header decoder. + */ + +#ifndef FFMPEG_MPEGAUDIODECHEADER_H +#define FFMPEG_MPEGAUDIODECHEADER_H + +#include "common.h" +#include "mpegaudio.h" + + +/* header decoding. MUST check the header before because no + consistency check is done there. Return 1 if free format found and + that the frame size must be computed externally */ +int ff_mpegaudio_decode_header(MPADecodeContext *s, uint32_t header); + +#endif /* FFMPEG_MPEGAUDIODECHEADER_H */ diff --git a/src/add-ons/media/plugins/avcodec/libavcodec/mpegaudiodectab.h b/src/add-ons/media/plugins/avcodec/libavcodec/mpegaudiodectab.h index 8a13127ad9..a41ff7aaa8 100644 --- a/src/add-ons/media/plugins/avcodec/libavcodec/mpegaudiodectab.h +++ b/src/add-ons/media/plugins/avcodec/libavcodec/mpegaudiodectab.h @@ -1,204 +1,34 @@ -/** - * @file mpegaudiodectab.h - * mpeg audio layer decoder tables. +/* + * MPEG Audio decoder + * copyright (c) 2002 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ -const uint16_t mpa_bitrate_tab[2][3][15] = { - { {0, 32, 64, 96, 128, 160, 192, 224, 256, 288, 320, 352, 384, 416, 448 }, - {0, 32, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384 }, - {0, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320 } }, - { {0, 32, 48, 56, 64, 80, 96, 112, 128, 144, 160, 176, 192, 224, 256}, - {0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160}, - {0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160} - } -}; +/** + * @file mpegaudiodectab.h + * mpeg audio layer decoder tables. + */ -const uint16_t mpa_freq_tab[3] = { 44100, 48000, 32000 }; +#ifndef FFMPEG_MPEGAUDIODECTAB_H +#define FFMPEG_MPEGAUDIODECTAB_H -/*******************************************************/ -/* half mpeg encoding window (full precision) */ -const int32_t mpa_enwindow[257] = { - 0, -1, -1, -1, -1, -1, -1, -2, - -2, -2, -2, -3, -3, -4, -4, -5, - -5, -6, -7, -7, -8, -9, -10, -11, - -13, -14, -16, -17, -19, -21, -24, -26, - -29, -31, -35, -38, -41, -45, -49, -53, - -58, -63, -68, -73, -79, -85, -91, -97, - -104, -111, -117, -125, -132, -139, -147, -154, - -161, -169, -176, -183, -190, -196, -202, -208, - 213, 218, 222, 225, 227, 228, 228, 227, - 224, 221, 215, 208, 200, 189, 177, 163, - 146, 127, 106, 83, 57, 29, -2, -36, - -72, -111, -153, -197, -244, -294, -347, -401, - -459, -519, -581, -645, -711, -779, -848, -919, - -991, -1064, -1137, -1210, -1283, -1356, -1428, -1498, - -1567, -1634, -1698, -1759, -1817, -1870, -1919, -1962, - -2001, -2032, -2057, -2075, -2085, -2087, -2080, -2063, - 2037, 2000, 1952, 1893, 1822, 1739, 1644, 1535, - 1414, 1280, 1131, 970, 794, 605, 402, 185, - -45, -288, -545, -814, -1095, -1388, -1692, -2006, - -2330, -2663, -3004, -3351, -3705, -4063, -4425, -4788, - -5153, -5517, -5879, -6237, -6589, -6935, -7271, -7597, - -7910, -8209, -8491, -8755, -8998, -9219, -9416, -9585, - -9727, -9838, -9916, -9959, -9966, -9935, -9863, -9750, - -9592, -9389, -9139, -8840, -8492, -8092, -7640, -7134, - 6574, 5959, 5288, 4561, 3776, 2935, 2037, 1082, - 70, -998, -2122, -3300, -4533, -5818, -7154, -8540, - -9975,-11455,-12980,-14548,-16155,-17799,-19478,-21189, --22929,-24694,-26482,-28289,-30112,-31947,-33791,-35640, --37489,-39336,-41176,-43006,-44821,-46617,-48390,-50137, --51853,-53534,-55178,-56778,-58333,-59838,-61289,-62684, --64019,-65290,-66494,-67629,-68692,-69679,-70590,-71420, --72169,-72835,-73415,-73908,-74313,-74630,-74856,-74992, - 75038, -}; - -/*******************************************************/ -/* layer 2 tables */ - -const int sblimit_table[5] = { 27 , 30 , 8, 12 , 30 }; - -const int quant_steps[17] = { - 3, 5, 7, 9, 15, - 31, 63, 127, 255, 511, - 1023, 2047, 4095, 8191, 16383, - 32767, 65535 -}; - -/* we use a negative value if grouped */ -const int quant_bits[17] = { - -5, -7, 3, -10, 4, - 5, 6, 7, 8, 9, - 10, 11, 12, 13, 14, - 15, 16 -}; - -/* encoding tables which give the quantization index. Note how it is - possible to store them efficiently ! */ -static const unsigned char alloc_table_0[] = { - 4, 0, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, - 4, 0, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, - 4, 0, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, - 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, - 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, - 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, - 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, - 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, - 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, - 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, - 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, - 3, 0, 1, 2, 3, 4, 5, 16, - 3, 0, 1, 2, 3, 4, 5, 16, - 3, 0, 1, 2, 3, 4, 5, 16, - 3, 0, 1, 2, 3, 4, 5, 16, - 3, 0, 1, 2, 3, 4, 5, 16, - 3, 0, 1, 2, 3, 4, 5, 16, - 3, 0, 1, 2, 3, 4, 5, 16, - 3, 0, 1, 2, 3, 4, 5, 16, - 3, 0, 1, 2, 3, 4, 5, 16, - 3, 0, 1, 2, 3, 4, 5, 16, - 3, 0, 1, 2, 3, 4, 5, 16, - 3, 0, 1, 2, 3, 4, 5, 16, - 2, 0, 1, 16, - 2, 0, 1, 16, - 2, 0, 1, 16, - 2, 0, 1, 16, -}; - -static const unsigned char alloc_table_1[] = { - 4, 0, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, - 4, 0, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, - 4, 0, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, - 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, - 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, - 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, - 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, - 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, - 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, - 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, - 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, - 3, 0, 1, 2, 3, 4, 5, 16, - 3, 0, 1, 2, 3, 4, 5, 16, - 3, 0, 1, 2, 3, 4, 5, 16, - 3, 0, 1, 2, 3, 4, 5, 16, - 3, 0, 1, 2, 3, 4, 5, 16, - 3, 0, 1, 2, 3, 4, 5, 16, - 3, 0, 1, 2, 3, 4, 5, 16, - 3, 0, 1, 2, 3, 4, 5, 16, - 3, 0, 1, 2, 3, 4, 5, 16, - 3, 0, 1, 2, 3, 4, 5, 16, - 3, 0, 1, 2, 3, 4, 5, 16, - 3, 0, 1, 2, 3, 4, 5, 16, - 2, 0, 1, 16, - 2, 0, 1, 16, - 2, 0, 1, 16, - 2, 0, 1, 16, - 2, 0, 1, 16, - 2, 0, 1, 16, - 2, 0, 1, 16, -}; - -static const unsigned char alloc_table_2[] = { - 4, 0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 4, 0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 3, 0, 1, 3, 4, 5, 6, 7, - 3, 0, 1, 3, 4, 5, 6, 7, - 3, 0, 1, 3, 4, 5, 6, 7, - 3, 0, 1, 3, 4, 5, 6, 7, - 3, 0, 1, 3, 4, 5, 6, 7, - 3, 0, 1, 3, 4, 5, 6, 7, -}; - -static const unsigned char alloc_table_3[] = { - 4, 0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 4, 0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 3, 0, 1, 3, 4, 5, 6, 7, - 3, 0, 1, 3, 4, 5, 6, 7, - 3, 0, 1, 3, 4, 5, 6, 7, - 3, 0, 1, 3, 4, 5, 6, 7, - 3, 0, 1, 3, 4, 5, 6, 7, - 3, 0, 1, 3, 4, 5, 6, 7, - 3, 0, 1, 3, 4, 5, 6, 7, - 3, 0, 1, 3, 4, 5, 6, 7, - 3, 0, 1, 3, 4, 5, 6, 7, - 3, 0, 1, 3, 4, 5, 6, 7, -}; - -static const unsigned char alloc_table_4[] = { - 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, - 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, - 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, - 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, - 3, 0, 1, 3, 4, 5, 6, 7, - 3, 0, 1, 3, 4, 5, 6, 7, - 3, 0, 1, 3, 4, 5, 6, 7, - 3, 0, 1, 3, 4, 5, 6, 7, - 3, 0, 1, 3, 4, 5, 6, 7, - 3, 0, 1, 3, 4, 5, 6, 7, - 3, 0, 1, 3, 4, 5, 6, 7, - 2, 0, 1, 3, - 2, 0, 1, 3, - 2, 0, 1, 3, - 2, 0, 1, 3, - 2, 0, 1, 3, - 2, 0, 1, 3, - 2, 0, 1, 3, - 2, 0, 1, 3, - 2, 0, 1, 3, - 2, 0, 1, 3, - 2, 0, 1, 3, - 2, 0, 1, 3, - 2, 0, 1, 3, - 2, 0, 1, 3, - 2, 0, 1, 3, - 2, 0, 1, 3, - 2, 0, 1, 3, - 2, 0, 1, 3, - 2, 0, 1, 3, -}; - -const unsigned char *alloc_tables[5] = -{ alloc_table_0, alloc_table_1, alloc_table_2, alloc_table_3, alloc_table_4, }; +#include +#include "mpegaudio.h" /*******************************************************/ /* layer 3 tables */ @@ -214,62 +44,62 @@ static const uint8_t lsf_nsf_table[6][3][4] = { { { 6, 5, 5, 5 }, { 9, 9, 9, 9 }, { 6, 9, 9, 9 } }, { { 6, 5, 7, 3 }, { 9, 9, 12, 6 }, { 6, 9, 12, 6 } }, { { 11, 10, 0, 0 }, { 18, 18, 0, 0 }, { 15, 18, 0, 0 } }, - { { 7, 7, 7, 0 }, { 12, 12, 12, 0 }, { 6, 15, 12, 0 } }, + { { 7, 7, 7, 0 }, { 12, 12, 12, 0 }, { 6, 15, 12, 0 } }, { { 6, 6, 6, 3 }, { 12, 9, 9, 6 }, { 6, 12, 9, 6 } }, { { 8, 8, 5, 0 }, { 15, 12, 9, 0 }, { 6, 18, 9, 0 } }, }; /* mpegaudio layer 3 huffman tables */ -const uint16_t mpa_huffcodes_1[4] = { +static const uint16_t mpa_huffcodes_1[4] = { 0x0001, 0x0001, 0x0001, 0x0000, }; -const uint8_t mpa_huffbits_1[4] = { +static const uint8_t mpa_huffbits_1[4] = { 1, 3, 2, 3, }; -const uint16_t mpa_huffcodes_2[9] = { +static const uint16_t mpa_huffcodes_2[9] = { 0x0001, 0x0002, 0x0001, 0x0003, 0x0001, 0x0001, 0x0003, 0x0002, 0x0000, }; -const uint8_t mpa_huffbits_2[9] = { +static const uint8_t mpa_huffbits_2[9] = { 1, 3, 6, 3, 3, 5, 5, 5, 6, }; -const uint16_t mpa_huffcodes_3[9] = { +static const uint16_t mpa_huffcodes_3[9] = { 0x0003, 0x0002, 0x0001, 0x0001, 0x0001, 0x0001, 0x0003, 0x0002, 0x0000, }; -const uint8_t mpa_huffbits_3[9] = { +static const uint8_t mpa_huffbits_3[9] = { 2, 2, 6, 3, 2, 5, 5, 5, 6, }; -const uint16_t mpa_huffcodes_5[16] = { +static const uint16_t mpa_huffcodes_5[16] = { 0x0001, 0x0002, 0x0006, 0x0005, 0x0003, 0x0001, 0x0004, 0x0004, 0x0007, 0x0005, 0x0007, 0x0001, 0x0006, 0x0001, 0x0001, 0x0000, }; -const uint8_t mpa_huffbits_5[16] = { +static const uint8_t mpa_huffbits_5[16] = { 1, 3, 6, 7, 3, 3, 6, 7, 6, 6, 7, 8, 7, 6, 7, 8, }; -const uint16_t mpa_huffcodes_6[16] = { +static const uint16_t mpa_huffcodes_6[16] = { 0x0007, 0x0003, 0x0005, 0x0001, 0x0006, 0x0002, 0x0003, 0x0002, 0x0005, 0x0004, 0x0004, 0x0001, 0x0003, 0x0003, 0x0002, 0x0000, }; -const uint8_t mpa_huffbits_6[16] = { +static const uint8_t mpa_huffbits_6[16] = { 3, 3, 5, 7, 3, 2, 4, 5, 4, 4, 5, 6, 6, 5, 6, 7, }; -const uint16_t mpa_huffcodes_7[36] = { +static const uint16_t mpa_huffcodes_7[36] = { 0x0001, 0x0002, 0x000a, 0x0013, 0x0010, 0x000a, 0x0003, 0x0003, 0x0007, 0x000a, 0x0005, 0x0003, 0x000b, 0x0004, 0x000d, 0x0011, 0x0008, 0x0004, 0x000c, 0x000b, 0x0012, 0x000f, 0x000b, 0x0002, @@ -277,7 +107,7 @@ const uint16_t mpa_huffcodes_7[36] = { 0x0005, 0x0003, 0x0002, 0x0000, }; -const uint8_t mpa_huffbits_7[36] = { +static const uint8_t mpa_huffbits_7[36] = { 1, 3, 6, 8, 8, 9, 3, 4, 6, 7, 7, 8, 6, 5, 7, 8, 8, 9, 7, 7, 8, 9, 9, 9, @@ -285,7 +115,7 @@ const uint8_t mpa_huffbits_7[36] = { 9, 10, 10, 10, }; -const uint16_t mpa_huffcodes_8[36] = { +static const uint16_t mpa_huffcodes_8[36] = { 0x0003, 0x0004, 0x0006, 0x0012, 0x000c, 0x0005, 0x0005, 0x0001, 0x0002, 0x0010, 0x0009, 0x0003, 0x0007, 0x0003, 0x0005, 0x000e, 0x0007, 0x0003, 0x0013, 0x0011, 0x000f, 0x000d, 0x000a, 0x0004, @@ -293,7 +123,7 @@ const uint16_t mpa_huffcodes_8[36] = { 0x0004, 0x0001, 0x0001, 0x0000, }; -const uint8_t mpa_huffbits_8[36] = { +static const uint8_t mpa_huffbits_8[36] = { 2, 3, 6, 8, 8, 9, 3, 2, 4, 8, 8, 8, 6, 4, 6, 8, 8, 9, 8, 8, 8, 9, 9, 10, @@ -301,7 +131,7 @@ const uint8_t mpa_huffbits_8[36] = { 9, 9, 11, 11, }; -const uint16_t mpa_huffcodes_9[36] = { +static const uint16_t mpa_huffcodes_9[36] = { 0x0007, 0x0005, 0x0009, 0x000e, 0x000f, 0x0007, 0x0006, 0x0004, 0x0005, 0x0005, 0x0006, 0x0007, 0x0007, 0x0006, 0x0008, 0x0008, 0x0008, 0x0005, 0x000f, 0x0006, 0x0009, 0x000a, 0x0005, 0x0001, @@ -309,7 +139,7 @@ const uint16_t mpa_huffcodes_9[36] = { 0x0006, 0x0002, 0x0006, 0x0000, }; -const uint8_t mpa_huffbits_9[36] = { +static const uint8_t mpa_huffbits_9[36] = { 3, 3, 5, 6, 8, 9, 3, 3, 4, 5, 6, 8, 4, 4, 5, 6, 7, 8, 6, 5, 6, 7, 7, 8, @@ -317,7 +147,7 @@ const uint8_t mpa_huffbits_9[36] = { 8, 8, 9, 9, }; -const uint16_t mpa_huffcodes_10[64] = { +static const uint16_t mpa_huffcodes_10[64] = { 0x0001, 0x0002, 0x000a, 0x0017, 0x0023, 0x001e, 0x000c, 0x0011, 0x0003, 0x0003, 0x0008, 0x000c, 0x0012, 0x0015, 0x000c, 0x0007, 0x000b, 0x0009, 0x000f, 0x0015, 0x0020, 0x0028, 0x0013, 0x0006, @@ -328,7 +158,7 @@ const uint16_t mpa_huffcodes_10[64] = { 0x0009, 0x0008, 0x0007, 0x0008, 0x0004, 0x0004, 0x0002, 0x0000, }; -const uint8_t mpa_huffbits_10[64] = { +static const uint8_t mpa_huffbits_10[64] = { 1, 3, 6, 8, 9, 9, 9, 10, 3, 4, 6, 7, 8, 9, 8, 8, 6, 6, 7, 8, 9, 10, 9, 9, @@ -339,7 +169,7 @@ const uint8_t mpa_huffbits_10[64] = { 9, 8, 9, 10, 10, 11, 11, 11, }; -const uint16_t mpa_huffcodes_11[64] = { +static const uint16_t mpa_huffcodes_11[64] = { 0x0003, 0x0004, 0x000a, 0x0018, 0x0022, 0x0021, 0x0015, 0x000f, 0x0005, 0x0003, 0x0004, 0x000a, 0x0020, 0x0011, 0x000b, 0x000a, 0x000b, 0x0007, 0x000d, 0x0012, 0x001e, 0x001f, 0x0014, 0x0005, @@ -350,7 +180,7 @@ const uint16_t mpa_huffcodes_11[64] = { 0x000b, 0x0004, 0x0006, 0x0006, 0x0006, 0x0003, 0x0002, 0x0000, }; -const uint8_t mpa_huffbits_11[64] = { +static const uint8_t mpa_huffbits_11[64] = { 2, 3, 5, 7, 8, 9, 8, 9, 3, 3, 4, 6, 8, 8, 7, 8, 5, 5, 6, 7, 8, 9, 8, 8, @@ -361,7 +191,7 @@ const uint8_t mpa_huffbits_11[64] = { 8, 7, 8, 9, 10, 10, 10, 10, }; -const uint16_t mpa_huffcodes_12[64] = { +static const uint16_t mpa_huffcodes_12[64] = { 0x0009, 0x0006, 0x0010, 0x0021, 0x0029, 0x0027, 0x0026, 0x001a, 0x0007, 0x0005, 0x0006, 0x0009, 0x0017, 0x0010, 0x001a, 0x000b, 0x0011, 0x0007, 0x000b, 0x000e, 0x0015, 0x001e, 0x000a, 0x0007, @@ -372,7 +202,7 @@ const uint16_t mpa_huffcodes_12[64] = { 0x001b, 0x000c, 0x0008, 0x000c, 0x0006, 0x0003, 0x0001, 0x0000, }; -const uint8_t mpa_huffbits_12[64] = { +static const uint8_t mpa_huffbits_12[64] = { 4, 3, 5, 7, 8, 9, 9, 9, 3, 3, 4, 5, 7, 7, 8, 8, 5, 4, 5, 6, 7, 8, 7, 8, @@ -383,7 +213,7 @@ const uint8_t mpa_huffbits_12[64] = { 9, 8, 8, 9, 9, 9, 9, 10, }; -const uint16_t mpa_huffcodes_13[256] = { +static const uint16_t mpa_huffcodes_13[256] = { 0x0001, 0x0005, 0x000e, 0x0015, 0x0022, 0x0033, 0x002e, 0x0047, 0x002a, 0x0034, 0x0044, 0x0034, 0x0043, 0x002c, 0x002b, 0x0013, 0x0003, 0x0004, 0x000c, 0x0013, 0x001f, 0x001a, 0x002c, 0x0021, @@ -418,7 +248,7 @@ const uint16_t mpa_huffcodes_13[256] = { 0x0011, 0x000c, 0x0010, 0x0008, 0x0001, 0x0001, 0x0000, 0x0001, }; -const uint8_t mpa_huffbits_13[256] = { +static const uint8_t mpa_huffbits_13[256] = { 1, 4, 6, 7, 8, 9, 9, 10, 9, 10, 11, 11, 12, 12, 13, 13, 3, 4, 6, 7, 8, 8, 9, 9, @@ -453,7 +283,7 @@ const uint8_t mpa_huffbits_13[256] = { 15, 15, 16, 16, 19, 18, 19, 16, }; -const uint16_t mpa_huffcodes_15[256] = { +static const uint16_t mpa_huffcodes_15[256] = { 0x0007, 0x000c, 0x0012, 0x0035, 0x002f, 0x004c, 0x007c, 0x006c, 0x0059, 0x007b, 0x006c, 0x0077, 0x006b, 0x0051, 0x007a, 0x003f, 0x000d, 0x0005, 0x0010, 0x001b, 0x002e, 0x0024, 0x003d, 0x0033, @@ -488,7 +318,7 @@ const uint16_t mpa_huffcodes_15[256] = { 0x0015, 0x0010, 0x000a, 0x0006, 0x0008, 0x0006, 0x0002, 0x0000, }; -const uint8_t mpa_huffbits_15[256] = { +static const uint8_t mpa_huffbits_15[256] = { 3, 4, 5, 7, 7, 8, 9, 9, 9, 10, 10, 11, 11, 11, 12, 13, 4, 3, 5, 6, 7, 7, 8, 8, @@ -523,7 +353,7 @@ const uint8_t mpa_huffbits_15[256] = { 12, 12, 12, 12, 13, 13, 13, 13, }; -const uint16_t mpa_huffcodes_16[256] = { +static const uint16_t mpa_huffcodes_16[256] = { 0x0001, 0x0005, 0x000e, 0x002c, 0x004a, 0x003f, 0x006e, 0x005d, 0x00ac, 0x0095, 0x008a, 0x00f2, 0x00e1, 0x00c3, 0x0178, 0x0011, 0x0003, 0x0004, 0x000c, 0x0014, 0x0023, 0x003e, 0x0035, 0x002f, @@ -558,7 +388,7 @@ const uint16_t mpa_huffcodes_16[256] = { 0x000d, 0x000c, 0x000a, 0x0007, 0x0005, 0x0003, 0x0001, 0x0003, }; -const uint8_t mpa_huffbits_16[256] = { +static const uint8_t mpa_huffbits_16[256] = { 1, 4, 6, 8, 9, 9, 10, 10, 11, 11, 11, 12, 12, 12, 13, 9, 3, 4, 6, 7, 8, 9, 9, 9, @@ -593,7 +423,7 @@ const uint8_t mpa_huffbits_16[256] = { 11, 11, 11, 11, 11, 11, 11, 8, }; -const uint16_t mpa_huffcodes_24[256] = { +static const uint16_t mpa_huffcodes_24[256] = { 0x000f, 0x000d, 0x002e, 0x0050, 0x0092, 0x0106, 0x00f8, 0x01b2, 0x01aa, 0x029d, 0x028d, 0x0289, 0x026d, 0x0205, 0x0408, 0x0058, 0x000e, 0x000c, 0x0015, 0x0026, 0x0047, 0x0082, 0x007a, 0x00d8, @@ -628,7 +458,7 @@ const uint16_t mpa_huffcodes_24[256] = { 0x0007, 0x0006, 0x0004, 0x0007, 0x0005, 0x0003, 0x0001, 0x0003, }; -const uint8_t mpa_huffbits_24[256] = { +static const uint8_t mpa_huffbits_24[256] = { 4, 4, 6, 7, 8, 9, 9, 10, 10, 11, 11, 11, 11, 11, 12, 9, 4, 4, 5, 6, 7, 8, 8, 9, @@ -663,7 +493,7 @@ const uint8_t mpa_huffbits_24[256] = { 7, 7, 7, 8, 8, 8, 8, 4, }; -const HuffTable mpa_huff_tables[16] = { +static const HuffTable mpa_huff_tables[16] = { { 1, NULL, NULL }, { 2, mpa_huffbits_1, mpa_huffcodes_1 }, { 3, mpa_huffbits_2, mpa_huffcodes_2 }, @@ -682,7 +512,7 @@ const HuffTable mpa_huff_tables[16] = { { 16, mpa_huffbits_24, mpa_huffcodes_24 }, }; -const uint8_t mpa_huff_data[32][2] = { +static const uint8_t mpa_huff_data[32][2] = { { 0, 0 }, { 1, 0 }, { 2, 0 }, @@ -719,18 +549,18 @@ const uint8_t mpa_huff_data[32][2] = { /* huffman tables for quadrules */ -static uint8_t mpa_quad_codes[2][16] = { +static const uint8_t mpa_quad_codes[2][16] = { { 1, 5, 4, 5, 6, 5, 4, 4, 7, 3, 6, 0, 7, 2, 3, 1, }, { 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, }, }; -static uint8_t mpa_quad_bits[2][16] = { +static const uint8_t mpa_quad_bits[2][16] = { { 1, 4, 4, 5, 4, 6, 5, 6, 4, 5, 5, 6, 5, 6, 6, 6, }, { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, }, }; /* band size tables */ -const uint8_t band_size_long[9][22] = { +static const uint8_t band_size_long[9][22] = { { 4, 4, 4, 4, 4, 4, 6, 6, 8, 8, 10, 12, 16, 20, 24, 28, 34, 42, 50, 54, 76, 158, }, /* 44100 */ { 4, 4, 4, 4, 4, 4, 6, 6, 6, 8, 10, @@ -751,7 +581,7 @@ const uint8_t band_size_long[9][22] = { 40, 48, 56, 64, 76, 90, 2, 2, 2, 2, 2, }, /* 8000 */ }; -const uint8_t band_size_short[9][13] = { +static const uint8_t band_size_short[9][13] = { { 4, 4, 4, 4, 6, 8, 10, 12, 14, 18, 22, 30, 56, }, /* 44100 */ { 4, 4, 4, 4, 6, 6, 10, 12, 14, 16, 20, 26, 66, }, /* 48000 */ { 4, 4, 4, 4, 6, 8, 12, 16, 20, 26, 34, 42, 12, }, /* 32000 */ @@ -763,12 +593,14 @@ const uint8_t band_size_short[9][13] = { { 8, 8, 8, 12, 16, 20, 24, 28, 36, 2, 2, 2, 26, }, /* 8000 */ }; -const uint8_t mpa_pretab[2][22] = { +static const uint8_t mpa_pretab[2][22] = { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 3, 2, 0 }, }; /* table for alias reduction (XXX: store it as integer !) */ -const float ci_table[8] = { +static const float ci_table[8] = { -0.6, -0.535, -0.33, -0.185, -0.095, -0.041, -0.0142, -0.0037, }; + +#endif /* FFMPEG_MPEGAUDIODECTAB_H */ diff --git a/src/add-ons/media/plugins/avcodec/libavcodec/mpegaudioenc.c b/src/add-ons/media/plugins/avcodec/libavcodec/mpegaudioenc.c new file mode 100644 index 0000000000..c061d7f5cf --- /dev/null +++ b/src/add-ons/media/plugins/avcodec/libavcodec/mpegaudioenc.c @@ -0,0 +1,803 @@ +/* + * The simplest mpeg audio layer 2 encoder + * Copyright (c) 2000, 2001 Fabrice Bellard. + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file mpegaudio.c + * The simplest mpeg audio layer 2 encoder. + */ + +#include "avcodec.h" +#include "bitstream.h" +#include "mpegaudio.h" + +/* currently, cannot change these constants (need to modify + quantization stage) */ +#define MUL(a,b) (((int64_t)(a) * (int64_t)(b)) >> FRAC_BITS) + +#define SAMPLES_BUF_SIZE 4096 + +typedef struct MpegAudioContext { + PutBitContext pb; + int nb_channels; + int freq, bit_rate; + int lsf; /* 1 if mpeg2 low bitrate selected */ + int bitrate_index; /* bit rate */ + int freq_index; + int frame_size; /* frame size, in bits, without padding */ + int64_t nb_samples; /* total number of samples encoded */ + /* padding computation */ + int frame_frac, frame_frac_incr, do_padding; + short samples_buf[MPA_MAX_CHANNELS][SAMPLES_BUF_SIZE]; /* buffer for filter */ + int samples_offset[MPA_MAX_CHANNELS]; /* offset in samples_buf */ + int sb_samples[MPA_MAX_CHANNELS][3][12][SBLIMIT]; + unsigned char scale_factors[MPA_MAX_CHANNELS][SBLIMIT][3]; /* scale factors */ + /* code to group 3 scale factors */ + unsigned char scale_code[MPA_MAX_CHANNELS][SBLIMIT]; + int sblimit; /* number of used subbands */ + const unsigned char *alloc_table; +} MpegAudioContext; + +/* define it to use floats in quantization (I don't like floats !) */ +//#define USE_FLOATS + +#include "mpegaudiodata.h" +#include "mpegaudiotab.h" + +static av_cold int MPA_encode_init(AVCodecContext *avctx) +{ + MpegAudioContext *s = avctx->priv_data; + int freq = avctx->sample_rate; + int bitrate = avctx->bit_rate; + int channels = avctx->channels; + int i, v, table; + float a; + + if (channels <= 0 || channels > 2){ + av_log(avctx, AV_LOG_ERROR, "encoding %d channel(s) is not allowed in mp2\n", channels); + return -1; + } + bitrate = bitrate / 1000; + s->nb_channels = channels; + s->freq = freq; + s->bit_rate = bitrate * 1000; + avctx->frame_size = MPA_FRAME_SIZE; + + /* encoding freq */ + s->lsf = 0; + for(i=0;i<3;i++) { + if (ff_mpa_freq_tab[i] == freq) + break; + if ((ff_mpa_freq_tab[i] / 2) == freq) { + s->lsf = 1; + break; + } + } + if (i == 3){ + av_log(avctx, AV_LOG_ERROR, "Sampling rate %d is not allowed in mp2\n", freq); + return -1; + } + s->freq_index = i; + + /* encoding bitrate & frequency */ + for(i=0;i<15;i++) { + if (ff_mpa_bitrate_tab[s->lsf][1][i] == bitrate) + break; + } + if (i == 15){ + av_log(avctx, AV_LOG_ERROR, "bitrate %d is not allowed in mp2\n", bitrate); + return -1; + } + s->bitrate_index = i; + + /* compute total header size & pad bit */ + + a = (float)(bitrate * 1000 * MPA_FRAME_SIZE) / (freq * 8.0); + s->frame_size = ((int)a) * 8; + + /* frame fractional size to compute padding */ + s->frame_frac = 0; + s->frame_frac_incr = (int)((a - floor(a)) * 65536.0); + + /* select the right allocation table */ + table = ff_mpa_l2_select_table(bitrate, s->nb_channels, freq, s->lsf); + + /* number of used subbands */ + s->sblimit = ff_mpa_sblimit_table[table]; + s->alloc_table = ff_mpa_alloc_tables[table]; + +#ifdef DEBUG + av_log(avctx, AV_LOG_DEBUG, "%d kb/s, %d Hz, frame_size=%d bits, table=%d, padincr=%x\n", + bitrate, freq, s->frame_size, table, s->frame_frac_incr); +#endif + + for(i=0;inb_channels;i++) + s->samples_offset[i] = 0; + + for(i=0;i<257;i++) { + int v; + v = ff_mpa_enwindow[i]; +#if WFRAC_BITS != 16 + v = (v + (1 << (16 - WFRAC_BITS - 1))) >> (16 - WFRAC_BITS); +#endif + filter_bank[i] = v; + if ((i & 63) != 0) + v = -v; + if (i != 0) + filter_bank[512 - i] = v; + } + + for(i=0;i<64;i++) { + v = (int)(pow(2.0, (3 - i) / 3.0) * (1 << 20)); + if (v <= 0) + v = 1; + scale_factor_table[i] = v; +#ifdef USE_FLOATS + scale_factor_inv_table[i] = pow(2.0, -(3 - i) / 3.0) / (float)(1 << 20); +#else +#define P 15 + scale_factor_shift[i] = 21 - P - (i / 3); + scale_factor_mult[i] = (1 << P) * pow(2.0, (i % 3) / 3.0); +#endif + } + for(i=0;i<128;i++) { + v = i - 64; + if (v <= -3) + v = 0; + else if (v < 0) + v = 1; + else if (v == 0) + v = 2; + else if (v < 3) + v = 3; + else + v = 4; + scale_diff_table[i] = v; + } + + for(i=0;i<17;i++) { + v = ff_mpa_quant_bits[i]; + if (v < 0) + v = -v; + else + v = v * 3; + total_quant_bits[i] = 12 * v; + } + + avctx->coded_frame= avcodec_alloc_frame(); + avctx->coded_frame->key_frame= 1; + + return 0; +} + +/* 32 point floating point IDCT without 1/sqrt(2) coef zero scaling */ +static void idct32(int *out, int *tab) +{ + int i, j; + int *t, *t1, xr; + const int *xp = costab32; + + for(j=31;j>=3;j-=2) tab[j] += tab[j - 2]; + + t = tab + 30; + t1 = tab + 2; + do { + t[0] += t[-4]; + t[1] += t[1 - 4]; + t -= 4; + } while (t != t1); + + t = tab + 28; + t1 = tab + 4; + do { + t[0] += t[-8]; + t[1] += t[1-8]; + t[2] += t[2-8]; + t[3] += t[3-8]; + t -= 8; + } while (t != t1); + + t = tab; + t1 = tab + 32; + do { + t[ 3] = -t[ 3]; + t[ 6] = -t[ 6]; + + t[11] = -t[11]; + t[12] = -t[12]; + t[13] = -t[13]; + t[15] = -t[15]; + t += 16; + } while (t != t1); + + + t = tab; + t1 = tab + 8; + do { + int x1, x2, x3, x4; + + x3 = MUL(t[16], FIX(SQRT2*0.5)); + x4 = t[0] - x3; + x3 = t[0] + x3; + + x2 = MUL(-(t[24] + t[8]), FIX(SQRT2*0.5)); + x1 = MUL((t[8] - x2), xp[0]); + x2 = MUL((t[8] + x2), xp[1]); + + t[ 0] = x3 + x1; + t[ 8] = x4 - x2; + t[16] = x4 + x2; + t[24] = x3 - x1; + t++; + } while (t != t1); + + xp += 2; + t = tab; + t1 = tab + 4; + do { + xr = MUL(t[28],xp[0]); + t[28] = (t[0] - xr); + t[0] = (t[0] + xr); + + xr = MUL(t[4],xp[1]); + t[ 4] = (t[24] - xr); + t[24] = (t[24] + xr); + + xr = MUL(t[20],xp[2]); + t[20] = (t[8] - xr); + t[ 8] = (t[8] + xr); + + xr = MUL(t[12],xp[3]); + t[12] = (t[16] - xr); + t[16] = (t[16] + xr); + t++; + } while (t != t1); + xp += 4; + + for (i = 0; i < 4; i++) { + xr = MUL(tab[30-i*4],xp[0]); + tab[30-i*4] = (tab[i*4] - xr); + tab[ i*4] = (tab[i*4] + xr); + + xr = MUL(tab[ 2+i*4],xp[1]); + tab[ 2+i*4] = (tab[28-i*4] - xr); + tab[28-i*4] = (tab[28-i*4] + xr); + + xr = MUL(tab[31-i*4],xp[0]); + tab[31-i*4] = (tab[1+i*4] - xr); + tab[ 1+i*4] = (tab[1+i*4] + xr); + + xr = MUL(tab[ 3+i*4],xp[1]); + tab[ 3+i*4] = (tab[29-i*4] - xr); + tab[29-i*4] = (tab[29-i*4] + xr); + + xp += 2; + } + + t = tab + 30; + t1 = tab + 1; + do { + xr = MUL(t1[0], *xp); + t1[0] = (t[0] - xr); + t[0] = (t[0] + xr); + t -= 2; + t1 += 2; + xp++; + } while (t >= tab); + + for(i=0;i<32;i++) { + out[i] = tab[bitinv32[i]]; + } +} + +#define WSHIFT (WFRAC_BITS + 15 - FRAC_BITS) + +static void filter(MpegAudioContext *s, int ch, short *samples, int incr) +{ + short *p, *q; + int sum, offset, i, j; + int tmp[64]; + int tmp1[32]; + int *out; + + // print_pow1(samples, 1152); + + offset = s->samples_offset[ch]; + out = &s->sb_samples[ch][0][0][0]; + for(j=0;j<36;j++) { + /* 32 samples at once */ + for(i=0;i<32;i++) { + s->samples_buf[ch][offset + (31 - i)] = samples[0]; + samples += incr; + } + + /* filter */ + p = s->samples_buf[ch] + offset; + q = filter_bank; + /* maxsum = 23169 */ + for(i=0;i<64;i++) { + sum = p[0*64] * q[0*64]; + sum += p[1*64] * q[1*64]; + sum += p[2*64] * q[2*64]; + sum += p[3*64] * q[3*64]; + sum += p[4*64] * q[4*64]; + sum += p[5*64] * q[5*64]; + sum += p[6*64] * q[6*64]; + sum += p[7*64] * q[7*64]; + tmp[i] = sum; + p++; + q++; + } + tmp1[0] = tmp[16] >> WSHIFT; + for( i=1; i<=16; i++ ) tmp1[i] = (tmp[i+16]+tmp[16-i]) >> WSHIFT; + for( i=17; i<=31; i++ ) tmp1[i] = (tmp[i+16]-tmp[80-i]) >> WSHIFT; + + idct32(out, tmp1); + + /* advance of 32 samples */ + offset -= 32; + out += 32; + /* handle the wrap around */ + if (offset < 0) { + memmove(s->samples_buf[ch] + SAMPLES_BUF_SIZE - (512 - 32), + s->samples_buf[ch], (512 - 32) * 2); + offset = SAMPLES_BUF_SIZE - 512; + } + } + s->samples_offset[ch] = offset; + + // print_pow(s->sb_samples, 1152); +} + +static void compute_scale_factors(unsigned char scale_code[SBLIMIT], + unsigned char scale_factors[SBLIMIT][3], + int sb_samples[3][12][SBLIMIT], + int sblimit) +{ + int *p, vmax, v, n, i, j, k, code; + int index, d1, d2; + unsigned char *sf = &scale_factors[0][0]; + + for(j=0;j vmax) + vmax = v; + } + /* compute the scale factor index using log 2 computations */ + if (vmax > 1) { + n = av_log2(vmax); + /* n is the position of the MSB of vmax. now + use at most 2 compares to find the index */ + index = (21 - n) * 3 - 3; + if (index >= 0) { + while (vmax <= scale_factor_table[index+1]) + index++; + } else { + index = 0; /* very unlikely case of overflow */ + } + } else { + index = 62; /* value 63 is not allowed */ + } + +#if 0 + printf("%2d:%d in=%x %x %d\n", + j, i, vmax, scale_factor_table[index], index); +#endif + /* store the scale factor */ + assert(index >=0 && index <= 63); + sf[i] = index; + } + + /* compute the transmission factor : look if the scale factors + are close enough to each other */ + d1 = scale_diff_table[sf[0] - sf[1] + 64]; + d2 = scale_diff_table[sf[1] - sf[2] + 64]; + + /* handle the 25 cases */ + switch(d1 * 5 + d2) { + case 0*5+0: + case 0*5+4: + case 3*5+4: + case 4*5+0: + case 4*5+4: + code = 0; + break; + case 0*5+1: + case 0*5+2: + case 4*5+1: + case 4*5+2: + code = 3; + sf[2] = sf[1]; + break; + case 0*5+3: + case 4*5+3: + code = 3; + sf[1] = sf[2]; + break; + case 1*5+0: + case 1*5+4: + case 2*5+4: + code = 1; + sf[1] = sf[0]; + break; + case 1*5+1: + case 1*5+2: + case 2*5+0: + case 2*5+1: + case 2*5+2: + code = 2; + sf[1] = sf[2] = sf[0]; + break; + case 2*5+3: + case 3*5+3: + code = 2; + sf[0] = sf[1] = sf[2]; + break; + case 3*5+0: + case 3*5+1: + case 3*5+2: + code = 2; + sf[0] = sf[2] = sf[1]; + break; + case 1*5+3: + code = 2; + if (sf[0] > sf[2]) + sf[0] = sf[2]; + sf[1] = sf[2] = sf[0]; + break; + default: + assert(0); //cannot happen + code = 0; /* kill warning */ + } + +#if 0 + printf("%d: %2d %2d %2d %d %d -> %d\n", j, + sf[0], sf[1], sf[2], d1, d2, code); +#endif + scale_code[j] = code; + sf += 3; + } +} + +/* The most important function : psycho acoustic module. In this + encoder there is basically none, so this is the worst you can do, + but also this is the simpler. */ +static void psycho_acoustic_model(MpegAudioContext *s, short smr[SBLIMIT]) +{ + int i; + + for(i=0;isblimit;i++) { + smr[i] = (int)(fixed_smr[i] * 10); + } +} + + +#define SB_NOTALLOCATED 0 +#define SB_ALLOCATED 1 +#define SB_NOMORE 2 + +/* Try to maximize the smr while using a number of bits inferior to + the frame size. I tried to make the code simpler, faster and + smaller than other encoders :-) */ +static void compute_bit_allocation(MpegAudioContext *s, + short smr1[MPA_MAX_CHANNELS][SBLIMIT], + unsigned char bit_alloc[MPA_MAX_CHANNELS][SBLIMIT], + int *padding) +{ + int i, ch, b, max_smr, max_ch, max_sb, current_frame_size, max_frame_size; + int incr; + short smr[MPA_MAX_CHANNELS][SBLIMIT]; + unsigned char subband_status[MPA_MAX_CHANNELS][SBLIMIT]; + const unsigned char *alloc; + + memcpy(smr, smr1, s->nb_channels * sizeof(short) * SBLIMIT); + memset(subband_status, SB_NOTALLOCATED, s->nb_channels * SBLIMIT); + memset(bit_alloc, 0, s->nb_channels * SBLIMIT); + + /* compute frame size and padding */ + max_frame_size = s->frame_size; + s->frame_frac += s->frame_frac_incr; + if (s->frame_frac >= 65536) { + s->frame_frac -= 65536; + s->do_padding = 1; + max_frame_size += 8; + } else { + s->do_padding = 0; + } + + /* compute the header + bit alloc size */ + current_frame_size = 32; + alloc = s->alloc_table; + for(i=0;isblimit;i++) { + incr = alloc[0]; + current_frame_size += incr * s->nb_channels; + alloc += 1 << incr; + } + for(;;) { + /* look for the subband with the largest signal to mask ratio */ + max_sb = -1; + max_ch = -1; + max_smr = INT_MIN; + for(ch=0;chnb_channels;ch++) { + for(i=0;isblimit;i++) { + if (smr[ch][i] > max_smr && subband_status[ch][i] != SB_NOMORE) { + max_smr = smr[ch][i]; + max_sb = i; + max_ch = ch; + } + } + } +#if 0 + printf("current=%d max=%d max_sb=%d alloc=%d\n", + current_frame_size, max_frame_size, max_sb, + bit_alloc[max_sb]); +#endif + if (max_sb < 0) + break; + + /* find alloc table entry (XXX: not optimal, should use + pointer table) */ + alloc = s->alloc_table; + for(i=0;iscale_code[max_ch][max_sb]] * 6; + incr += total_quant_bits[alloc[1]]; + } else { + /* increments bit allocation */ + b = bit_alloc[max_ch][max_sb]; + incr = total_quant_bits[alloc[b + 1]] - + total_quant_bits[alloc[b]]; + } + + if (current_frame_size + incr <= max_frame_size) { + /* can increase size */ + b = ++bit_alloc[max_ch][max_sb]; + current_frame_size += incr; + /* decrease smr by the resolution we added */ + smr[max_ch][max_sb] = smr1[max_ch][max_sb] - quant_snr[alloc[b]]; + /* max allocation size reached ? */ + if (b == ((1 << alloc[0]) - 1)) + subband_status[max_ch][max_sb] = SB_NOMORE; + else + subband_status[max_ch][max_sb] = SB_ALLOCATED; + } else { + /* cannot increase the size of this subband */ + subband_status[max_ch][max_sb] = SB_NOMORE; + } + } + *padding = max_frame_size - current_frame_size; + assert(*padding >= 0); + +#if 0 + for(i=0;isblimit;i++) { + printf("%d ", bit_alloc[i]); + } + printf("\n"); +#endif +} + +/* + * Output the mpeg audio layer 2 frame. Note how the code is small + * compared to other encoders :-) + */ +static void encode_frame(MpegAudioContext *s, + unsigned char bit_alloc[MPA_MAX_CHANNELS][SBLIMIT], + int padding) +{ + int i, j, k, l, bit_alloc_bits, b, ch; + unsigned char *sf; + int q[3]; + PutBitContext *p = &s->pb; + + /* header */ + + put_bits(p, 12, 0xfff); + put_bits(p, 1, 1 - s->lsf); /* 1 = mpeg1 ID, 0 = mpeg2 lsf ID */ + put_bits(p, 2, 4-2); /* layer 2 */ + put_bits(p, 1, 1); /* no error protection */ + put_bits(p, 4, s->bitrate_index); + put_bits(p, 2, s->freq_index); + put_bits(p, 1, s->do_padding); /* use padding */ + put_bits(p, 1, 0); /* private_bit */ + put_bits(p, 2, s->nb_channels == 2 ? MPA_STEREO : MPA_MONO); + put_bits(p, 2, 0); /* mode_ext */ + put_bits(p, 1, 0); /* no copyright */ + put_bits(p, 1, 1); /* original */ + put_bits(p, 2, 0); /* no emphasis */ + + /* bit allocation */ + j = 0; + for(i=0;isblimit;i++) { + bit_alloc_bits = s->alloc_table[j]; + for(ch=0;chnb_channels;ch++) { + put_bits(p, bit_alloc_bits, bit_alloc[ch][i]); + } + j += 1 << bit_alloc_bits; + } + + /* scale codes */ + for(i=0;isblimit;i++) { + for(ch=0;chnb_channels;ch++) { + if (bit_alloc[ch][i]) + put_bits(p, 2, s->scale_code[ch][i]); + } + } + + /* scale factors */ + for(i=0;isblimit;i++) { + for(ch=0;chnb_channels;ch++) { + if (bit_alloc[ch][i]) { + sf = &s->scale_factors[ch][i][0]; + switch(s->scale_code[ch][i]) { + case 0: + put_bits(p, 6, sf[0]); + put_bits(p, 6, sf[1]); + put_bits(p, 6, sf[2]); + break; + case 3: + case 1: + put_bits(p, 6, sf[0]); + put_bits(p, 6, sf[2]); + break; + case 2: + put_bits(p, 6, sf[0]); + break; + } + } + } + } + + /* quantization & write sub band samples */ + + for(k=0;k<3;k++) { + for(l=0;l<12;l+=3) { + j = 0; + for(i=0;isblimit;i++) { + bit_alloc_bits = s->alloc_table[j]; + for(ch=0;chnb_channels;ch++) { + b = bit_alloc[ch][i]; + if (b) { + int qindex, steps, m, sample, bits; + /* we encode 3 sub band samples of the same sub band at a time */ + qindex = s->alloc_table[j+b]; + steps = ff_mpa_quant_steps[qindex]; + for(m=0;m<3;m++) { + sample = s->sb_samples[ch][k][l + m][i]; + /* divide by scale factor */ +#ifdef USE_FLOATS + { + float a; + a = (float)sample * scale_factor_inv_table[s->scale_factors[ch][i][k]]; + q[m] = (int)((a + 1.0) * steps * 0.5); + } +#else + { + int q1, e, shift, mult; + e = s->scale_factors[ch][i][k]; + shift = scale_factor_shift[e]; + mult = scale_factor_mult[e]; + + /* normalize to P bits */ + if (shift < 0) + q1 = sample << (-shift); + else + q1 = sample >> shift; + q1 = (q1 * mult) >> P; + q[m] = ((q1 + (1 << P)) * steps) >> (P + 1); + } +#endif + if (q[m] >= steps) + q[m] = steps - 1; + assert(q[m] >= 0 && q[m] < steps); + } + bits = ff_mpa_quant_bits[qindex]; + if (bits < 0) { + /* group the 3 values to save bits */ + put_bits(p, -bits, + q[0] + steps * (q[1] + steps * q[2])); +#if 0 + printf("%d: gr1 %d\n", + i, q[0] + steps * (q[1] + steps * q[2])); +#endif + } else { +#if 0 + printf("%d: gr3 %d %d %d\n", + i, q[0], q[1], q[2]); +#endif + put_bits(p, bits, q[0]); + put_bits(p, bits, q[1]); + put_bits(p, bits, q[2]); + } + } + } + /* next subband in alloc table */ + j += 1 << bit_alloc_bits; + } + } + } + + /* padding */ + for(i=0;ipriv_data; + short *samples = data; + short smr[MPA_MAX_CHANNELS][SBLIMIT]; + unsigned char bit_alloc[MPA_MAX_CHANNELS][SBLIMIT]; + int padding, i; + + for(i=0;inb_channels;i++) { + filter(s, i, samples + i, s->nb_channels); + } + + for(i=0;inb_channels;i++) { + compute_scale_factors(s->scale_code[i], s->scale_factors[i], + s->sb_samples[i], s->sblimit); + } + for(i=0;inb_channels;i++) { + psycho_acoustic_model(s, smr[i]); + } + compute_bit_allocation(s, smr, bit_alloc, &padding); + + init_put_bits(&s->pb, frame, MPA_MAX_CODED_FRAME_SIZE); + + encode_frame(s, bit_alloc, padding); + + s->nb_samples += MPA_FRAME_SIZE; + return pbBufPtr(&s->pb) - s->pb.buf; +} + +static av_cold int MPA_encode_close(AVCodecContext *avctx) +{ + av_freep(&avctx->coded_frame); + return 0; +} + +AVCodec mp2_encoder = { + "mp2", + CODEC_TYPE_AUDIO, + CODEC_ID_MP2, + sizeof(MpegAudioContext), + MPA_encode_init, + MPA_encode_frame, + MPA_encode_close, + NULL, + .sample_fmts = (enum SampleFormat[]){SAMPLE_FMT_S16,SAMPLE_FMT_NONE}, + .long_name = NULL_IF_CONFIG_SMALL("MP2 (MPEG audio layer 2)"), +}; + +#undef FIX diff --git a/src/add-ons/media/plugins/avcodec/libavcodec/mpegaudiotab.h b/src/add-ons/media/plugins/avcodec/libavcodec/mpegaudiotab.h index 59bf868932..61ea471b1c 100644 --- a/src/add-ons/media/plugins/avcodec/libavcodec/mpegaudiotab.h +++ b/src/add-ons/media/plugins/avcodec/libavcodec/mpegaudiotab.h @@ -1,30 +1,49 @@ /* * mpeg audio layer 2 tables. Most of them come from the mpeg audio * specification. - * + * * Copyright (c) 2000, 2001 Fabrice Bellard. * - * The licence of this code is contained in file LICENCE found in the - * same archive + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file mpegaudiotab.h - * mpeg audio layer 2 tables. + * mpeg audio layer 2 tables. * Most of them come from the mpeg audio specification. */ - + +#ifndef FFMPEG_MPEGAUDIOTAB_H +#define FFMPEG_MPEGAUDIOTAB_H + +#include +#include "mpegaudio.h" + #define SQRT2 1.41421356237309514547 static const int costab32[30] = { FIX(0.54119610014619701222), FIX(1.3065629648763763537), - + FIX(0.50979557910415917998), FIX(2.5629154477415054814), FIX(0.89997622313641556513), FIX(0.60134488693504528634), - + FIX(0.5024192861881556782), FIX(5.1011486186891552563), FIX(0.78815462345125020249), @@ -33,7 +52,7 @@ static const int costab32[30] = { FIX(1.0606776859903470633), FIX(1.7224470982383341955), FIX(0.52249861493968885462), - + FIX(10.19000812354803287), FIX(0.674808341455005678), FIX(1.1694399334328846596), @@ -75,13 +94,13 @@ static unsigned char scale_diff_table[128]; static unsigned short total_quant_bits[17]; /* signal to noise ratio of each quantification step (could be - computed from quant_steps[]). The values are dB multiplied by 10 + computed from quant_steps[]). The values are dB multiplied by 10 */ -static unsigned short quant_snr[17] = { +static const unsigned short quant_snr[17] = { 70, 110, 160, 208, 253, 316, 378, 439, - 499, 559, 620, 680, - 740, 800, 861, 920, + 499, 559, 620, 680, + 740, 800, 861, 920, 980 }; @@ -96,3 +115,4 @@ static const float fixed_smr[SBLIMIT] = { static const unsigned char nb_scale_factors[4] = { 3, 2, 1, 2 }; +#endif /* FFMPEG_MPEGAUDIOTAB_H */ diff --git a/src/add-ons/media/plugins/avcodec/libavcodec/mpegvideo.c b/src/add-ons/media/plugins/avcodec/libavcodec/mpegvideo.c index 7d1c5ba1ef..845bc39b83 100644 --- a/src/add-ons/media/plugins/avcodec/libavcodec/mpegvideo.c +++ b/src/add-ons/media/plugins/avcodec/libavcodec/mpegvideo.c @@ -3,71 +3,60 @@ * Copyright (c) 2000,2001 Fabrice Bellard. * Copyright (c) 2002-2004 Michael Niedermayer * - * This library is free software; you can redistribute it and/or + * 4MV & hq & B-frame encoding stuff by Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. + * version 2.1 of the License, or (at your option) any later version. * - * This library is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - * 4MV & hq & b-frame encoding stuff by Michael Niedermayer + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ - + /** * @file mpegvideo.c * The simplest mpeg encoder (well, it was the simplest!). - */ - -#include + */ + #include "avcodec.h" #include "dsputil.h" #include "mpegvideo.h" +#include "mpegvideo_common.h" +#include "mjpegenc.h" +#include "msmpeg4.h" #include "faandct.h" - -#ifdef USE_FASTMEMCPY -#include "fastmemcpy.h" -#endif +#include //#undef NDEBUG //#include -#ifdef CONFIG_ENCODERS -static void encode_picture(MpegEncContext *s, int picture_number); -#endif //CONFIG_ENCODERS -static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s, +static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s, DCTELEM *block, int n, int qscale); -static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s, +static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s, DCTELEM *block, int n, int qscale); static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s, DCTELEM *block, int n, int qscale); +static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s, + DCTELEM *block, int n, int qscale); static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s, DCTELEM *block, int n, int qscale); -static void dct_unquantize_h263_intra_c(MpegEncContext *s, +static void dct_unquantize_h263_intra_c(MpegEncContext *s, DCTELEM *block, int n, int qscale); -static void dct_unquantize_h263_inter_c(MpegEncContext *s, +static void dct_unquantize_h263_inter_c(MpegEncContext *s, DCTELEM *block, int n, int qscale); -static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w); -#ifdef CONFIG_ENCODERS -static int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow); -static int dct_quantize_trellis_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow); -static int sse_mb(MpegEncContext *s); -static void denoise_dct_c(MpegEncContext *s, DCTELEM *block); -#endif //CONFIG_ENCODERS -#ifdef HAVE_XVMC extern int XVMC_field_start(MpegEncContext*s, AVCodecContext *avctx); extern void XVMC_field_end(MpegEncContext *s); extern void XVMC_decode_mb(MpegEncContext *s); -#endif - -void (*draw_edges)(uint8_t *buf, int wrap, int width, int height, int w)= draw_edges_c; /* enable all paranoid tests for rounding, overflows, etc... */ @@ -76,184 +65,80 @@ void (*draw_edges)(uint8_t *buf, int wrap, int width, int height, int w)= draw_e //#define DEBUG -/* for jpeg fast DCT */ -#define CONST_BITS 14 - -static const uint16_t aanscales[64] = { - /* precomputed values scaled up by 14 bits */ - 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520, - 22725, 31521, 29692, 26722, 22725, 17855, 12299, 6270, - 21407, 29692, 27969, 25172, 21407, 16819, 11585, 5906, - 19266, 26722, 25172, 22654, 19266, 15137, 10426, 5315, - 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520, - 12873, 17855, 16819, 15137, 12873, 10114, 6967, 3552, - 8867 , 12299, 11585, 10426, 8867, 6967, 4799, 2446, - 4520 , 6270, 5906, 5315, 4520, 3552, 2446, 1247 -}; - -static const uint8_t h263_chroma_roundtab[16] = { -// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 - 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, -}; - static const uint8_t ff_default_chroma_qscale_table[32]={ // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 }; -#ifdef CONFIG_ENCODERS -static uint8_t (*default_mv_penalty)[MAX_MV*2+1]=NULL; -static uint8_t default_fcode_tab[MAX_MV*2+1]; +const uint8_t ff_mpeg1_dc_scale_table[128]={ +// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 + 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, +}; -enum PixelFormat ff_yuv420p_list[2]= {PIX_FMT_YUV420P, -1}; -static void convert_matrix(DSPContext *dsp, int (*qmat)[64], uint16_t (*qmat16)[2][64], - const uint16_t *quant_matrix, int bias, int qmin, int qmax) -{ - int qscale; - - for(qscale=qmin; qscale<=qmax; qscale++){ - int i; - if (dsp->fdct == ff_jpeg_fdct_islow -#ifdef FAAN_POSTSCALE - || dsp->fdct == ff_faandct -#endif - ) { - for(i=0;i<64;i++) { - const int j= dsp->idct_permutation[i]; - /* 16 <= qscale * quant_matrix[i] <= 7905 */ - /* 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026 */ - /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */ - /* 3444240 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */ - - qmat[qscale][i] = (int)((uint64_t_C(1) << QMAT_SHIFT) / - (qscale * quant_matrix[j])); - } - } else if (dsp->fdct == fdct_ifast -#ifndef FAAN_POSTSCALE - || dsp->fdct == ff_faandct -#endif - ) { - for(i=0;i<64;i++) { - const int j= dsp->idct_permutation[i]; - /* 16 <= qscale * quant_matrix[i] <= 7905 */ - /* 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026 */ - /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */ - /* 3444240 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */ - - qmat[qscale][i] = (int)((uint64_t_C(1) << (QMAT_SHIFT + 14)) / - (aanscales[i] * qscale * quant_matrix[j])); - } - } else { - for(i=0;i<64;i++) { - const int j= dsp->idct_permutation[i]; - /* We can safely suppose that 16 <= quant_matrix[i] <= 255 - So 16 <= qscale * quant_matrix[i] <= 7905 - so (1<<19) / 16 >= (1<<19) / (qscale * quant_matrix[i]) >= (1<<19) / 7905 - so 32768 >= (1<<19) / (qscale * quant_matrix[i]) >= 67 - */ - qmat[qscale][i] = (int)((uint64_t_C(1) << QMAT_SHIFT) / (qscale * quant_matrix[j])); -// qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[i]); - qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[j]); - - if(qmat16[qscale][0][i]==0 || qmat16[qscale][0][i]==128*256) qmat16[qscale][0][i]=128*256-1; - qmat16[qscale][1][i]= ROUNDED_DIV(bias<<(16-QUANT_BIAS_SHIFT), qmat16[qscale][0][i]); - } - } - } -} - -static inline void update_qscale(MpegEncContext *s){ - s->qscale= (s->lambda*139 + FF_LAMBDA_SCALE*64) >> (FF_LAMBDA_SHIFT + 7); - s->qscale= clip(s->qscale, s->avctx->qmin, s->avctx->qmax); - - s->lambda2= (s->lambda*s->lambda + FF_LAMBDA_SCALE/2) >> FF_LAMBDA_SHIFT; -} -#endif //CONFIG_ENCODERS - -void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable){ - int i; - int end; - - st->scantable= src_scantable; - - for(i=0; i<64; i++){ - int j; - j = src_scantable[i]; - st->permutated[i] = permutation[j]; -#ifdef ARCH_POWERPC - st->inverse[j] = i; -#endif - } - - end=-1; - for(i=0; i<64; i++){ - int j; - j = st->permutated[i]; - if(j>end) end=j; - st->raster_end[i]= end; - } -} - -#ifdef CONFIG_ENCODERS -void ff_write_quant_matrix(PutBitContext *pb, int16_t *matrix){ +const uint8_t *ff_find_start_code(const uint8_t * restrict p, const uint8_t *end, uint32_t * restrict state){ int i; - if(matrix){ - put_bits(pb, 1, 1); - for(i=0;i<64;i++) { - put_bits(pb, 8, matrix[ ff_zigzag_direct[i] ]); + assert(p<=end); + if(p>=end) + return end; + + for(i=0; i<3; i++){ + uint32_t tmp= *state << 8; + *state= tmp + *(p++); + if(tmp == 0x100 || p==end) + return p; + } + + while(p 1 ) p+= 3; + else if(p[-2] ) p+= 2; + else if(p[-3]|(p[-1]-1)) p++; + else{ + p++; + break; } - }else - put_bits(pb, 1, 0); + } + + p= FFMIN(p, end)-4; + *state= AV_RB32(p); + + return p+4; } -#endif //CONFIG_ENCODERS /* init common dct for both encoder and decoder */ -int DCT_common_init(MpegEncContext *s) +int ff_dct_common_init(MpegEncContext *s) { s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c; s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c; s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c; s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c; s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c; + if(s->flags & CODEC_FLAG_BITEXACT) + s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact; s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c; -#ifdef CONFIG_ENCODERS - s->dct_quantize= dct_quantize_c; - s->denoise_dct= denoise_dct_c; -#endif - -#ifdef HAVE_MMX +#if defined(HAVE_MMX) MPV_common_init_mmx(s); -#endif -#ifdef ARCH_ALPHA +#elif defined(ARCH_ALPHA) MPV_common_init_axp(s); -#endif -#ifdef HAVE_MLIB +#elif defined(CONFIG_MLIB) MPV_common_init_mlib(s); -#endif -#ifdef HAVE_MMI +#elif defined(HAVE_MMI) MPV_common_init_mmi(s); -#endif -#ifdef ARCH_ARMV4L +#elif defined(ARCH_ARMV4L) MPV_common_init_armv4l(s); +#elif defined(HAVE_ALTIVEC) + MPV_common_init_altivec(s); +#elif defined(ARCH_BFIN) + MPV_common_init_bfin(s); #endif -#ifdef ARCH_POWERPC - MPV_common_init_ppc(s); -#endif - -#ifdef CONFIG_ENCODERS - s->fast_dct_quantize= s->dct_quantize; - - if(s->flags&CODEC_FLAG_TRELLIS_QUANT){ - s->dct_quantize= dct_quantize_trellis_c; //move before MPV_common_init_* - } - -#endif //CONFIG_ENCODERS /* load & permutate scantables - note: only wmv uses differnt ones + note: only wmv uses different ones */ if(s->alternate_scan){ ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan); @@ -265,70 +150,58 @@ int DCT_common_init(MpegEncContext *s) ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan); ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan); - s->picture_structure= PICT_FRAME; - return 0; } -static void copy_picture(Picture *dst, Picture *src){ +void copy_picture(Picture *dst, Picture *src){ *dst = *src; dst->type= FF_BUFFER_TYPE_COPY; } -static void copy_picture_attributes(AVFrame *dst, AVFrame *src){ - dst->pict_type = src->pict_type; - dst->quality = src->quality; - dst->coded_picture_number = src->coded_picture_number; - dst->display_picture_number = src->display_picture_number; -// dst->reference = src->reference; - dst->pts = src->pts; - dst->interlaced_frame = src->interlaced_frame; - dst->top_field_first = src->top_field_first; -} - /** * allocates a Picture * The pixels are allocated/set by calling get_buffer() if shared=0 */ -static int alloc_picture(MpegEncContext *s, Picture *pic, int shared){ - const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) doesnt sig11 +int alloc_picture(MpegEncContext *s, Picture *pic, int shared){ + const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) does not sig11 const int mb_array_size= s->mb_stride*s->mb_height; const int b8_array_size= s->b8_stride*s->mb_height*2; const int b4_array_size= s->b4_stride*s->mb_height*4; int i; - + int r= -1; + if(shared){ assert(pic->data[0]); assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED); pic->type= FF_BUFFER_TYPE_SHARED; }else{ - int r; - assert(!pic->data[0]); - + r= s->avctx->get_buffer(s->avctx, (AVFrame*)pic); - + if(r<0 || !pic->age || !pic->type || !pic->data[0]){ - av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]); + av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]); return -1; } if(s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])){ av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n"); + s->avctx->release_buffer(s->avctx, (AVFrame*)pic); return -1; } if(pic->linesize[1] != pic->linesize[2]){ - av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride missmatch)\n"); + av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride mismatch)\n"); + s->avctx->release_buffer(s->avctx, (AVFrame*)pic); return -1; } s->linesize = pic->linesize[0]; s->uvlinesize= pic->linesize[1]; } - + if(pic->qscale_table==NULL){ - if (s->encoding) { + if (s->encoding) { CHECKED_ALLOCZ(pic->mb_var , mb_array_size * sizeof(int16_t)) CHECKED_ALLOCZ(pic->mc_mb_var, mb_array_size * sizeof(int16_t)) CHECKED_ALLOCZ(pic->mb_mean , mb_array_size * sizeof(int8_t)) @@ -336,34 +209,41 @@ static int alloc_picture(MpegEncContext *s, Picture *pic, int shared){ CHECKED_ALLOCZ(pic->mbskip_table , mb_array_size * sizeof(uint8_t)+2) //the +2 is for the slice end check CHECKED_ALLOCZ(pic->qscale_table , mb_array_size * sizeof(uint8_t)) - CHECKED_ALLOCZ(pic->mb_type_base , big_mb_num * sizeof(uint32_t)) - pic->mb_type= pic->mb_type_base + s->mb_stride+1; + CHECKED_ALLOCZ(pic->mb_type_base , (big_mb_num + s->mb_stride) * sizeof(uint32_t)) + pic->mb_type= pic->mb_type_base + 2*s->mb_stride+1; if(s->out_format == FMT_H264){ for(i=0; i<2; i++){ - CHECKED_ALLOCZ(pic->motion_val_base[i], 2 * (b4_array_size+1) * sizeof(int16_t)) - pic->motion_val[i]= pic->motion_val_base[i]+1; - CHECKED_ALLOCZ(pic->ref_index[i] , b8_array_size * sizeof(uint8_t)) + CHECKED_ALLOCZ(pic->motion_val_base[i], 2 * (b4_array_size+4) * sizeof(int16_t)) + pic->motion_val[i]= pic->motion_val_base[i]+4; + CHECKED_ALLOCZ(pic->ref_index[i], b8_array_size * sizeof(uint8_t)) } pic->motion_subsample_log2= 2; }else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){ for(i=0; i<2; i++){ - CHECKED_ALLOCZ(pic->motion_val_base[i], 2 * (b8_array_size+1) * sizeof(int16_t)*2) //FIXME - pic->motion_val[i]= pic->motion_val_base[i]+1; + CHECKED_ALLOCZ(pic->motion_val_base[i], 2 * (b8_array_size+4) * sizeof(int16_t)) + pic->motion_val[i]= pic->motion_val_base[i]+4; + CHECKED_ALLOCZ(pic->ref_index[i], b8_array_size * sizeof(uint8_t)) } pic->motion_subsample_log2= 3; } + if(s->avctx->debug&FF_DEBUG_DCT_COEFF) { + CHECKED_ALLOCZ(pic->dct_coeff, 64 * mb_array_size * sizeof(DCTELEM)*6) + } pic->qstride= s->mb_stride; CHECKED_ALLOCZ(pic->pan_scan , 1 * sizeof(AVPanScan)) } - //it might be nicer if the application would keep track of these but it would require a API change + /* It might be nicer if the application would keep track of these + * but it would require an API change. */ memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1); - s->prev_pict_types[0]= s->pict_type; - if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == B_TYPE) - pic->age= INT_MAX; // skiped MBs in b frames are quite rare in mpeg1/2 and its a bit tricky to skip them anyway - + s->prev_pict_types[0]= s->dropable ? FF_B_TYPE : s->pict_type; + if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == FF_B_TYPE) + pic->age= INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2 and it is a bit tricky to skip them anyway. + return 0; fail: //for the CHECKED_ALLOCZ macro + if(r>=0) + s->avctx->release_buffer(s->avctx, (AVFrame*)pic); return -1; } @@ -383,80 +263,198 @@ static void free_picture(MpegEncContext *s, Picture *pic){ av_freep(&pic->mbskip_table); av_freep(&pic->qscale_table); av_freep(&pic->mb_type_base); + av_freep(&pic->dct_coeff); av_freep(&pic->pan_scan); pic->mb_type= NULL; for(i=0; i<2; i++){ av_freep(&pic->motion_val_base[i]); av_freep(&pic->ref_index[i]); } - + if(pic->type == FF_BUFFER_TYPE_SHARED){ for(i=0; i<4; i++){ pic->base[i]= pic->data[i]= NULL; } - pic->type= 0; + pic->type= 0; } } -/* init common structure for both encoder and decoder */ +static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base){ + int i; + + // edge emu needs blocksize + filter length - 1 (=17x17 for halfpel / 21x21 for h264) + CHECKED_ALLOCZ(s->allocated_edge_emu_buffer, (s->width+64)*2*21*2); //(width + edge + align)*interlaced*MBsize*tolerance + s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*21; + + //FIXME should be linesize instead of s->width*2 but that is not known before get_buffer() + CHECKED_ALLOCZ(s->me.scratchpad, (s->width+64)*4*16*2*sizeof(uint8_t)) + s->rd_scratchpad= s->me.scratchpad; + s->b_scratchpad= s->me.scratchpad; + s->obmc_scratchpad= s->me.scratchpad + 16; + if (s->encoding) { + CHECKED_ALLOCZ(s->me.map , ME_MAP_SIZE*sizeof(uint32_t)) + CHECKED_ALLOCZ(s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t)) + if(s->avctx->noise_reduction){ + CHECKED_ALLOCZ(s->dct_error_sum, 2 * 64 * sizeof(int)) + } + } + CHECKED_ALLOCZ(s->blocks, 64*12*2 * sizeof(DCTELEM)) + s->block= s->blocks[0]; + + for(i=0;i<12;i++){ + s->pblocks[i] = (short *)(&s->block[i]); + } + return 0; +fail: + return -1; //free() through MPV_common_end() +} + +static void free_duplicate_context(MpegEncContext *s){ + if(s==NULL) return; + + av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL; + av_freep(&s->me.scratchpad); + s->rd_scratchpad= + s->b_scratchpad= + s->obmc_scratchpad= NULL; + + av_freep(&s->dct_error_sum); + av_freep(&s->me.map); + av_freep(&s->me.score_map); + av_freep(&s->blocks); + s->block= NULL; +} + +static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src){ +#define COPY(a) bak->a= src->a + COPY(allocated_edge_emu_buffer); + COPY(edge_emu_buffer); + COPY(me.scratchpad); + COPY(rd_scratchpad); + COPY(b_scratchpad); + COPY(obmc_scratchpad); + COPY(me.map); + COPY(me.score_map); + COPY(blocks); + COPY(block); + COPY(start_mb_y); + COPY(end_mb_y); + COPY(me.map_generation); + COPY(pb); + COPY(dct_error_sum); + COPY(dct_count[0]); + COPY(dct_count[1]); +#undef COPY +} + +void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src){ + MpegEncContext bak; + int i; + //FIXME copy only needed parts +//START_TIMER + backup_duplicate_context(&bak, dst); + memcpy(dst, src, sizeof(MpegEncContext)); + backup_duplicate_context(dst, &bak); + for(i=0;i<12;i++){ + dst->pblocks[i] = (short *)(&dst->block[i]); + } +//STOP_TIMER("update_duplicate_context") //about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads +} + +/** + * sets the given MpegEncContext to common defaults (same for encoding and decoding). + * the changed fields will not depend upon the prior state of the MpegEncContext. + */ +void MPV_common_defaults(MpegEncContext *s){ + s->y_dc_scale_table= + s->c_dc_scale_table= ff_mpeg1_dc_scale_table; + s->chroma_qscale_table= ff_default_chroma_qscale_table; + s->progressive_frame= 1; + s->progressive_sequence= 1; + s->picture_structure= PICT_FRAME; + + s->coded_picture_number = 0; + s->picture_number = 0; + s->input_picture_number = 0; + + s->picture_in_gop_number = 0; + + s->f_code = 1; + s->b_code = 1; +} + +/** + * sets the given MpegEncContext to defaults for decoding. + * the changed fields will not depend upon the prior state of the MpegEncContext. + */ +void MPV_decode_defaults(MpegEncContext *s){ + MPV_common_defaults(s); +} + +/** + * init common structure for both encoder and decoder. + * this assumes that some variables like width/height are already set + */ int MPV_common_init(MpegEncContext *s) { - int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y; + int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y, threads; + + s->mb_height = (s->height + 15) / 16; + + if(s->avctx->thread_count > MAX_THREADS || (s->avctx->thread_count > s->mb_height && s->mb_height)){ + av_log(s->avctx, AV_LOG_ERROR, "too many threads\n"); + return -1; + } + + if((s->width || s->height) && avcodec_check_dimensions(s->avctx, s->width, s->height)) + return -1; dsputil_init(&s->dsp, s->avctx); - DCT_common_init(s); + ff_dct_common_init(s); s->flags= s->avctx->flags; s->flags2= s->avctx->flags2; s->mb_width = (s->width + 15) / 16; - s->mb_height = (s->height + 15) / 16; s->mb_stride = s->mb_width + 1; s->b8_stride = s->mb_width*2 + 1; s->b4_stride = s->mb_width*4 + 1; mb_array_size= s->mb_height * s->mb_stride; mv_table_size= (s->mb_height+2) * s->mb_stride + 1; + /* set chroma shifts */ + avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift), + &(s->chroma_y_shift) ); + /* set default edge pos, will be overriden in decode_header if needed */ s->h_edge_pos= s->mb_width*16; s->v_edge_pos= s->mb_height*16; s->mb_num = s->mb_width * s->mb_height; - + s->block_wrap[0]= s->block_wrap[1]= s->block_wrap[2]= - s->block_wrap[3]= s->mb_width*2 + 2; + s->block_wrap[3]= s->b8_stride; s->block_wrap[4]= - s->block_wrap[5]= s->mb_width + 2; + s->block_wrap[5]= s->mb_stride; - s->y_dc_scale_table= - s->c_dc_scale_table= ff_mpeg1_dc_scale_table; - s->chroma_qscale_table= ff_default_chroma_qscale_table; - if (!s->encoding) - s->progressive_sequence= 1; - s->progressive_frame= 1; - s->coded_picture_number = 0; - - y_size = (2 * s->mb_width + 2) * (2 * s->mb_height + 2); - c_size = (s->mb_width + 2) * (s->mb_height + 2); + y_size = s->b8_stride * (2 * s->mb_height + 1); + c_size = s->mb_stride * (s->mb_height + 1); yc_size = y_size + 2 * c_size; /* convert fourcc to upper case */ - s->avctx->codec_tag= toupper( s->avctx->codec_tag &0xFF) + s->codec_tag= toupper( s->avctx->codec_tag &0xFF) + (toupper((s->avctx->codec_tag>>8 )&0xFF)<<8 ) - + (toupper((s->avctx->codec_tag>>16)&0xFF)<<16) + + (toupper((s->avctx->codec_tag>>16)&0xFF)<<16) + (toupper((s->avctx->codec_tag>>24)&0xFF)<<24); - s->avctx->stream_codec_tag= toupper( s->avctx->stream_codec_tag &0xFF) + s->stream_codec_tag= toupper( s->avctx->stream_codec_tag &0xFF) + (toupper((s->avctx->stream_codec_tag>>8 )&0xFF)<<8 ) - + (toupper((s->avctx->stream_codec_tag>>16)&0xFF)<<16) + + (toupper((s->avctx->stream_codec_tag>>16)&0xFF)<<16) + (toupper((s->avctx->stream_codec_tag>>24)&0xFF)<<24); - CHECKED_ALLOCZ(s->allocated_edge_emu_buffer, (s->width+64)*2*17*2); //(width + edge + align)*interlaced*MBsize*tolerance - s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*17; - s->avctx->coded_frame= (AVFrame*)&s->current_picture; CHECKED_ALLOCZ(s->mb_index2xy, (s->mb_num+1)*sizeof(int)) //error ressilience code looks cleaner with this @@ -466,7 +464,7 @@ int MPV_common_init(MpegEncContext *s) } } s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed? - + if (s->encoding) { /* Allocate MV tables */ CHECKED_ALLOCZ(s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t)) @@ -482,17 +480,6 @@ int MPV_common_init(MpegEncContext *s) s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1; s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1; - //FIXME should be linesize instead of s->width*2 but that isnt known before get_buffer() - CHECKED_ALLOCZ(s->me.scratchpad, s->width*2*16*3*sizeof(uint8_t)) - - CHECKED_ALLOCZ(s->me.map , ME_MAP_SIZE*sizeof(uint32_t)) - CHECKED_ALLOCZ(s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t)) - - if(s->codec_id==CODEC_ID_MPEG4){ - CHECKED_ALLOCZ(s->tex_pb_buffer, PB_BUFFER_SIZE); - CHECKED_ALLOCZ( s->pb2_buffer, PB_BUFFER_SIZE); - } - if(s->msmpeg4_version){ CHECKED_ALLOCZ(s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int)); } @@ -500,27 +487,24 @@ int MPV_common_init(MpegEncContext *s) /* Allocate MB type table */ CHECKED_ALLOCZ(s->mb_type , mb_array_size * sizeof(uint16_t)) //needed for encoding - + CHECKED_ALLOCZ(s->lambda_table, mb_array_size * sizeof(int)) - + CHECKED_ALLOCZ(s->q_intra_matrix, 64*32 * sizeof(int)) CHECKED_ALLOCZ(s->q_inter_matrix, 64*32 * sizeof(int)) CHECKED_ALLOCZ(s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t)) CHECKED_ALLOCZ(s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t)) CHECKED_ALLOCZ(s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*)) CHECKED_ALLOCZ(s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*)) - + if(s->avctx->noise_reduction){ - CHECKED_ALLOCZ(s->dct_error_sum, 2 * 64 * sizeof(int)) CHECKED_ALLOCZ(s->dct_offset, 2 * 64 * sizeof(uint16_t)) } } - CHECKED_ALLOCZ(s->blocks, 64*6*2 * sizeof(DCTELEM)) - CHECKED_ALLOCZ(s->picture, MAX_PICTURE_COUNT * sizeof(Picture)) CHECKED_ALLOCZ(s->error_status_table, mb_array_size*sizeof(uint8_t)) - + if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){ /* interlaced direct mode decoding tables */ for(i=0; i<2; i++){ @@ -539,71 +523,82 @@ int MPV_common_init(MpegEncContext *s) } if (s->out_format == FMT_H263) { /* ac values */ - CHECKED_ALLOCZ(s->ac_val[0], yc_size * sizeof(int16_t) * 16); - s->ac_val[1] = s->ac_val[0] + y_size; + CHECKED_ALLOCZ(s->ac_val_base, yc_size * sizeof(int16_t) * 16); + s->ac_val[0] = s->ac_val_base + s->b8_stride + 1; + s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1; s->ac_val[2] = s->ac_val[1] + c_size; - + /* cbp values */ - CHECKED_ALLOCZ(s->coded_block, y_size); - - /* divx501 bitstream reorder buffer */ - CHECKED_ALLOCZ(s->bitstream_buffer, BITSTREAM_BUFFER_SIZE); + CHECKED_ALLOCZ(s->coded_block_base, y_size); + s->coded_block= s->coded_block_base + s->b8_stride + 1; /* cbp, ac_pred, pred_dir */ CHECKED_ALLOCZ(s->cbp_table , mb_array_size * sizeof(uint8_t)) CHECKED_ALLOCZ(s->pred_dir_table, mb_array_size * sizeof(uint8_t)) } - + if (s->h263_pred || s->h263_plus || !s->encoding) { /* dc values */ //MN: we need these for error resilience of intra-frames - CHECKED_ALLOCZ(s->dc_val[0], yc_size * sizeof(int16_t)); - s->dc_val[1] = s->dc_val[0] + y_size; + CHECKED_ALLOCZ(s->dc_val_base, yc_size * sizeof(int16_t)); + s->dc_val[0] = s->dc_val_base + s->b8_stride + 1; + s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1; s->dc_val[2] = s->dc_val[1] + c_size; for(i=0;idc_val[0][i] = 1024; + s->dc_val_base[i] = 1024; } /* which mb is a intra block */ CHECKED_ALLOCZ(s->mbintra_table, mb_array_size); memset(s->mbintra_table, 1, mb_array_size); - - /* default structure is frame */ - s->picture_structure = PICT_FRAME; - + /* init macroblock skip table */ CHECKED_ALLOCZ(s->mbskip_table, mb_array_size+2); //Note the +1 is for a quicker mpeg4 slice_end detection CHECKED_ALLOCZ(s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE); - - s->block= s->blocks[0]; - - for(i=0;i<12;i++){ - s->pblocks[i] = (short *)(&s->block[i]); - } s->parse_context.state= -1; if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){ s->visualization_buffer[0] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH); - s->visualization_buffer[1] = av_malloc((s->mb_width*8 + EDGE_WIDTH) * s->mb_height*8 + EDGE_WIDTH); - s->visualization_buffer[2] = av_malloc((s->mb_width*8 + EDGE_WIDTH) * s->mb_height*8 + EDGE_WIDTH); + s->visualization_buffer[1] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH); + s->visualization_buffer[2] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH); } s->context_initialized = 1; + + s->thread_context[0]= s; + threads = s->avctx->thread_count; + + for(i=1; ithread_context[i]= av_malloc(sizeof(MpegEncContext)); + memcpy(s->thread_context[i], s, sizeof(MpegEncContext)); + } + + for(i=0; ithread_context[i], s) < 0) + goto fail; + s->thread_context[i]->start_mb_y= (s->mb_height*(i ) + s->avctx->thread_count/2) / s->avctx->thread_count; + s->thread_context[i]->end_mb_y = (s->mb_height*(i+1) + s->avctx->thread_count/2) / s->avctx->thread_count; + } + return 0; fail: MPV_common_end(s); return -1; } - -//extern int sads; - /* init common structure for both encoder and decoder */ void MPV_common_end(MpegEncContext *s) { int i, j, k; + for(i=0; iavctx->thread_count; i++){ + free_duplicate_context(s->thread_context[i]); + } + for(i=1; iavctx->thread_count; i++){ + av_freep(&s->thread_context[i]); + } + av_freep(&s->parse_context.buffer); s->parse_context.buffer_size=0; @@ -632,23 +627,19 @@ void MPV_common_end(MpegEncContext *s) } av_freep(&s->p_field_select_table[i]); } - - av_freep(&s->dc_val[0]); - av_freep(&s->ac_val[0]); - av_freep(&s->coded_block); + + av_freep(&s->dc_val_base); + av_freep(&s->ac_val_base); + av_freep(&s->coded_block_base); av_freep(&s->mbintra_table); av_freep(&s->cbp_table); av_freep(&s->pred_dir_table); - av_freep(&s->me.scratchpad); - av_freep(&s->me.map); - av_freep(&s->me.score_map); - + av_freep(&s->mbskip_table); av_freep(&s->prev_pict_types); av_freep(&s->bitstream_buffer); - av_freep(&s->tex_pb_buffer); - av_freep(&s->pb2_buffer); - av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL; + s->allocated_bitstream_buffer_size=0; + av_freep(&s->avctx->stats_out); av_freep(&s->ac_stats); av_freep(&s->error_status_table); @@ -658,10 +649,8 @@ void MPV_common_end(MpegEncContext *s) av_freep(&s->q_inter_matrix); av_freep(&s->q_intra_matrix16); av_freep(&s->q_inter_matrix16); - av_freep(&s->blocks); av_freep(&s->input_picture); av_freep(&s->reordered_input_picture); - av_freep(&s->dct_error_sum); av_freep(&s->dct_offset); if(s->picture){ @@ -670,411 +659,28 @@ void MPV_common_end(MpegEncContext *s) } } av_freep(&s->picture); - avcodec_default_free_buffers(s->avctx); s->context_initialized = 0; s->last_picture_ptr= s->next_picture_ptr= s->current_picture_ptr= NULL; + s->linesize= s->uvlinesize= 0; + for(i=0; i<3; i++) - if (s->visualization_buffer[i]) - av_free(s->visualization_buffer[i]); + av_freep(&s->visualization_buffer[i]); + + avcodec_default_free_buffers(s->avctx); } -#ifdef CONFIG_ENCODERS - -/* init video encoder */ -int MPV_encode_init(AVCodecContext *avctx) -{ - MpegEncContext *s = avctx->priv_data; - int i, dummy; - int chroma_h_shift, chroma_v_shift; - - avctx->pix_fmt = PIX_FMT_YUV420P; // FIXME - - s->bit_rate = avctx->bit_rate; - s->width = avctx->width; - s->height = avctx->height; - if(avctx->gop_size > 600){ - av_log(avctx, AV_LOG_ERROR, "Warning keyframe interval too large! reducing it ...\n"); - avctx->gop_size=600; - } - s->gop_size = avctx->gop_size; - s->avctx = avctx; - s->flags= avctx->flags; - s->flags2= avctx->flags2; - s->max_b_frames= avctx->max_b_frames; - s->codec_id= avctx->codec->id; - s->luma_elim_threshold = avctx->luma_elim_threshold; - s->chroma_elim_threshold= avctx->chroma_elim_threshold; - s->strict_std_compliance= avctx->strict_std_compliance; - s->data_partitioning= avctx->flags & CODEC_FLAG_PART; - s->quarter_sample= (avctx->flags & CODEC_FLAG_QPEL)!=0; - s->mpeg_quant= avctx->mpeg_quant; - s->rtp_mode= !!avctx->rtp_payload_size; - - if (s->gop_size <= 1) { - s->intra_only = 1; - s->gop_size = 12; - } else { - s->intra_only = 0; - } - - s->me_method = avctx->me_method; - - /* Fixed QSCALE */ - s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE); - - s->adaptive_quant= ( s->avctx->lumi_masking - || s->avctx->dark_masking - || s->avctx->temporal_cplx_masking - || s->avctx->spatial_cplx_masking - || s->avctx->p_masking - || (s->flags&CODEC_FLAG_QP_RD)) - && !s->fixed_qscale; - - s->obmc= !!(s->flags & CODEC_FLAG_OBMC); - s->loop_filter= !!(s->flags & CODEC_FLAG_LOOP_FILTER); - s->alternate_scan= !!(s->flags & CODEC_FLAG_ALT_SCAN); - - if(avctx->rc_max_rate && !avctx->rc_buffer_size){ - av_log(avctx, AV_LOG_ERROR, "a vbv buffer size is needed, for encoding with a maximum bitrate\n"); - return -1; - } - - if(avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate){ - av_log(avctx, AV_LOG_INFO, "Warning min_rate > 0 but min_rate != max_rate isnt recommanded!\n"); - } - - if((s->flags & CODEC_FLAG_4MV) && s->codec_id != CODEC_ID_MPEG4 - && s->codec_id != CODEC_ID_H263 && s->codec_id != CODEC_ID_H263P){ - av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n"); - return -1; - } - - if(s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE){ - av_log(avctx, AV_LOG_ERROR, "OBMC is only supported with simple mb decission\n"); - return -1; - } - - if(s->obmc && s->codec_id != CODEC_ID_H263 && s->codec_id != CODEC_ID_H263P){ - av_log(avctx, AV_LOG_ERROR, "OBMC is only supported with H263(+)\n"); - return -1; - } - - if(s->quarter_sample && s->codec_id != CODEC_ID_MPEG4){ - av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n"); - return -1; - } - - if(s->data_partitioning && s->codec_id != CODEC_ID_MPEG4){ - av_log(avctx, AV_LOG_ERROR, "data partitioning not supported by codec\n"); - return -1; - } - - if(s->max_b_frames && s->codec_id != CODEC_ID_MPEG4 && s->codec_id != CODEC_ID_MPEG1VIDEO && s->codec_id != CODEC_ID_MPEG2VIDEO){ - av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n"); - return -1; - } - - if(s->mpeg_quant && s->codec_id != CODEC_ID_MPEG4){ //FIXME mpeg2 uses that too - av_log(avctx, AV_LOG_ERROR, "mpeg2 style quantization not supporetd by codec\n"); - return -1; - } - - if((s->flags & CODEC_FLAG_CBP_RD) && !(s->flags & CODEC_FLAG_TRELLIS_QUANT)){ - av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n"); - return -1; - } - - if((s->flags & CODEC_FLAG_QP_RD) && s->avctx->mb_decision != FF_MB_DECISION_RD){ - av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n"); - return -1; - } - - if(s->avctx->scenechange_threshold < 1000000000 && (s->flags & CODEC_FLAG_CLOSED_GOP)){ - av_log(avctx, AV_LOG_ERROR, "closed gop with scene change detection arent supported yet\n"); - return -1; - } - - i= ff_gcd(avctx->frame_rate, avctx->frame_rate_base); - if(i > 1){ - av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n"); - avctx->frame_rate /= i; - avctx->frame_rate_base /= i; -// return -1; - } - - if(s->codec_id==CODEC_ID_MJPEG){ - s->intra_quant_bias= 1<<(QUANT_BIAS_SHIFT-1); //(a + x/2)/x - s->inter_quant_bias= 0; - }else if(s->mpeg_quant || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO){ - s->intra_quant_bias= 3<<(QUANT_BIAS_SHIFT-3); //(a + x*3/8)/x - s->inter_quant_bias= 0; - }else{ - s->intra_quant_bias=0; - s->inter_quant_bias=-(1<<(QUANT_BIAS_SHIFT-2)); //(a - x/4)/x - } - - if(avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS) - s->intra_quant_bias= avctx->intra_quant_bias; - if(avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS) - s->inter_quant_bias= avctx->inter_quant_bias; - - avcodec_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift, &chroma_v_shift); - - av_reduce(&s->time_increment_resolution, &dummy, s->avctx->frame_rate, s->avctx->frame_rate_base, (1<<16)-1); - s->time_increment_bits = av_log2(s->time_increment_resolution - 1) + 1; - - switch(avctx->codec->id) { - case CODEC_ID_MPEG1VIDEO: - s->out_format = FMT_MPEG1; - s->low_delay= 0; //s->max_b_frames ? 0 : 1; - avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1); - break; - case CODEC_ID_MPEG2VIDEO: - s->out_format = FMT_MPEG1; - s->low_delay= 0; //s->max_b_frames ? 0 : 1; - avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1); - s->rtp_mode= 1; - break; - case CODEC_ID_LJPEG: - case CODEC_ID_MJPEG: - s->out_format = FMT_MJPEG; - s->intra_only = 1; /* force intra only for jpeg */ - s->mjpeg_write_tables = 1; /* write all tables */ - s->mjpeg_data_only_frames = 0; /* write all the needed headers */ - s->mjpeg_vsample[0] = 1<mjpeg_vsample[1] = 1; - s->mjpeg_vsample[2] = 1; - s->mjpeg_hsample[0] = 1<mjpeg_hsample[1] = 1; - s->mjpeg_hsample[2] = 1; - if (mjpeg_init(s) < 0) - return -1; - avctx->delay=0; - s->low_delay=1; - break; -#ifdef CONFIG_RISKY - case CODEC_ID_H263: - if (h263_get_picture_format(s->width, s->height) == 7) { - av_log(avctx, AV_LOG_INFO, "Input picture size isn't suitable for h263 codec! try h263+\n"); - return -1; - } - s->out_format = FMT_H263; - s->obmc= (avctx->flags & CODEC_FLAG_OBMC) ? 1:0; - avctx->delay=0; - s->low_delay=1; - break; - case CODEC_ID_H263P: - s->out_format = FMT_H263; - s->h263_plus = 1; - /* Fx */ - s->umvplus = (avctx->flags & CODEC_FLAG_H263P_UMV) ? 1:0; - s->h263_aic= (avctx->flags & CODEC_FLAG_H263P_AIC) ? 1:0; - s->modified_quant= s->h263_aic; - s->alt_inter_vlc= (avctx->flags & CODEC_FLAG_H263P_AIV) ? 1:0; - s->obmc= (avctx->flags & CODEC_FLAG_OBMC) ? 1:0; - s->loop_filter= (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1:0; - s->unrestricted_mv= s->obmc || s->loop_filter || s->umvplus; - s->h263_slice_structured= (s->flags & CODEC_FLAG_H263P_SLICE_STRUCT) ? 1:0; - - /* /Fx */ - /* These are just to be sure */ - avctx->delay=0; - s->low_delay=1; - break; - case CODEC_ID_FLV1: - s->out_format = FMT_H263; - s->h263_flv = 2; /* format = 1; 11-bit codes */ - s->unrestricted_mv = 1; - s->rtp_mode=0; /* don't allow GOB */ - avctx->delay=0; - s->low_delay=1; - break; - case CODEC_ID_RV10: - s->out_format = FMT_H263; - avctx->delay=0; - s->low_delay=1; - break; - case CODEC_ID_MPEG4: - s->out_format = FMT_H263; - s->h263_pred = 1; - s->unrestricted_mv = 1; - s->low_delay= s->max_b_frames ? 0 : 1; - avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1); - break; - case CODEC_ID_MSMPEG4V1: - s->out_format = FMT_H263; - s->h263_msmpeg4 = 1; - s->h263_pred = 1; - s->unrestricted_mv = 1; - s->msmpeg4_version= 1; - avctx->delay=0; - s->low_delay=1; - break; - case CODEC_ID_MSMPEG4V2: - s->out_format = FMT_H263; - s->h263_msmpeg4 = 1; - s->h263_pred = 1; - s->unrestricted_mv = 1; - s->msmpeg4_version= 2; - avctx->delay=0; - s->low_delay=1; - break; - case CODEC_ID_MSMPEG4V3: - s->out_format = FMT_H263; - s->h263_msmpeg4 = 1; - s->h263_pred = 1; - s->unrestricted_mv = 1; - s->msmpeg4_version= 3; - s->flipflop_rounding=1; - avctx->delay=0; - s->low_delay=1; - break; - case CODEC_ID_WMV1: - s->out_format = FMT_H263; - s->h263_msmpeg4 = 1; - s->h263_pred = 1; - s->unrestricted_mv = 1; - s->msmpeg4_version= 4; - s->flipflop_rounding=1; - avctx->delay=0; - s->low_delay=1; - break; - case CODEC_ID_WMV2: - s->out_format = FMT_H263; - s->h263_msmpeg4 = 1; - s->h263_pred = 1; - s->unrestricted_mv = 1; - s->msmpeg4_version= 5; - s->flipflop_rounding=1; - avctx->delay=0; - s->low_delay=1; - break; -#endif - default: - return -1; - } - - { /* set up some save defaults, some codecs might override them later */ - static int done=0; - if(!done){ - int i; - done=1; - - default_mv_penalty= av_mallocz( sizeof(uint8_t)*(MAX_FCODE+1)*(2*MAX_MV+1) ); - memset(default_mv_penalty, 0, sizeof(uint8_t)*(MAX_FCODE+1)*(2*MAX_MV+1)); - memset(default_fcode_tab , 0, sizeof(uint8_t)*(2*MAX_MV+1)); - - for(i=-16; i<16; i++){ - default_fcode_tab[i + MAX_MV]= 1; - } - } - } - s->me.mv_penalty= default_mv_penalty; - s->fcode_tab= default_fcode_tab; - - /* dont use mv_penalty table for crap MV as it would be confused */ - //FIXME remove after fixing / removing old ME - if (s->me_method < ME_EPZS) s->me.mv_penalty = default_mv_penalty; - - s->encoding = 1; - - /* init */ - if (MPV_common_init(s) < 0) - return -1; - - if(s->modified_quant) - s->chroma_qscale_table= ff_h263_chroma_qscale_table; - s->progressive_frame= - s->progressive_sequence= !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME)); - - ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp); - - ff_init_me(s); - -#ifdef CONFIG_ENCODERS -#ifdef CONFIG_RISKY - if (s->out_format == FMT_H263) - h263_encode_init(s); - if(s->msmpeg4_version) - ff_msmpeg4_encode_init(s); -#endif - if (s->out_format == FMT_MPEG1) - ff_mpeg1_encode_init(s); -#endif - - /* init default q matrix */ - for(i=0;i<64;i++) { - int j= s->dsp.idct_permutation[i]; -#ifdef CONFIG_RISKY - if(s->codec_id==CODEC_ID_MPEG4 && s->mpeg_quant){ - s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i]; - s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i]; - }else if(s->out_format == FMT_H263){ - s->intra_matrix[j] = - s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i]; - }else -#endif - { /* mpeg1/2 */ - s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i]; - s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i]; - } - if(s->avctx->intra_matrix) - s->intra_matrix[j] = s->avctx->intra_matrix[i]; - if(s->avctx->inter_matrix) - s->inter_matrix[j] = s->avctx->inter_matrix[i]; - } - - /* precompute matrix */ - /* for mjpeg, we do include qscale in the matrix */ - if (s->out_format != FMT_MJPEG) { - convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16, - s->intra_matrix, s->intra_quant_bias, 1, 31); - convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16, - s->inter_matrix, s->inter_quant_bias, 1, 31); - } - - if(ff_rate_control_init(s) < 0) - return -1; - - s->picture_number = 0; - s->input_picture_number = 0; - s->picture_in_gop_number = 0; - /* motion detector init */ - s->f_code = 1; - s->b_code = 1; - - return 0; -} - -int MPV_encode_end(AVCodecContext *avctx) -{ - MpegEncContext *s = avctx->priv_data; - -#ifdef STATS - print_stats(); -#endif - - ff_rate_control_uninit(s); - - MPV_common_end(s); - if (s->out_format == FMT_MJPEG) - mjpeg_close(s); - - av_freep(&avctx->extradata); - - return 0; -} - -#endif //CONFIG_ENCODERS - -void init_rl(RLTable *rl) +void init_rl(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3]) { int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1]; uint8_t index_run[MAX_RUN+1]; int last, run, level, start, end, i; + /* If table is static, we can quit if rl->max_level[0] is not NULL */ + if(static_store && rl->max_level[0]) + return; + /* compute max_level[], max_run[] and index_run[] */ for(last=0;last<2;last++) { if (last == 0) { @@ -1098,47 +704,67 @@ void init_rl(RLTable *rl) if (run > max_run[level]) max_run[level] = run; } - rl->max_level[last] = av_malloc(MAX_RUN + 1); + if(static_store) + rl->max_level[last] = static_store[last]; + else + rl->max_level[last] = av_malloc(MAX_RUN + 1); memcpy(rl->max_level[last], max_level, MAX_RUN + 1); - rl->max_run[last] = av_malloc(MAX_LEVEL + 1); + if(static_store) + rl->max_run[last] = static_store[last] + MAX_RUN + 1; + else + rl->max_run[last] = av_malloc(MAX_LEVEL + 1); memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1); - rl->index_run[last] = av_malloc(MAX_RUN + 1); + if(static_store) + rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2; + else + rl->index_run[last] = av_malloc(MAX_RUN + 1); memcpy(rl->index_run[last], index_run, MAX_RUN + 1); } } -/* draw the edges of width 'w' of an image of size width, height */ -//FIXME check that this is ok for mpeg4 interlaced -static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w) +void init_vlc_rl(RLTable *rl) { - uint8_t *ptr, *last_line; - int i; + int i, q; - last_line = buf + (height - 1) * wrap; - for(i=0;ivlc.table_size; i++){ + int code= rl->vlc.table[i][0]; + int len = rl->vlc.table[i][1]; + int level, run; + + if(len==0){ // illegal code + run= 66; + level= MAX_LEVEL; + }else if(len<0){ //more bits needed + run= 0; + level= code; + }else{ + if(code==rl->n){ //esc + run= 66; + level= 0; + }else{ + run= rl->table_run [code] + 1; + level= rl->table_level[code] * qmul + qadd; + if(code >= rl->last) run+=192; + } + } + rl->rl_vlc[q][i].len= len; + rl->rl_vlc[q][i].level= level; + rl->rl_vlc[q][i].run= run; + } } } int ff_find_unused_picture(MpegEncContext *s, int shared){ int i; - + if(shared){ for(i=0; ipicture[i].data[0]==NULL && s->picture[i].type==0) return i; @@ -1152,7 +778,19 @@ int ff_find_unused_picture(MpegEncContext *s, int shared){ } } - assert(0); + av_log(s->avctx, AV_LOG_FATAL, "Internal error, picture buffer overflow\n"); + /* We could return -1, but the codec would crash trying to draw into a + * non-existing frame anyway. This is safer than waiting for a random crash. + * Also the return of this is never useful, an encoder must only allocate + * as much as allowed in the specification. This has no relationship to how + * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large + * enough for such valid streams). + * Plus, a decoder has to check stream validity and remove frames if too + * many reference frames are around. Waiting for "OOM" is not correct at + * all. Similarly, missing reference frames have to be replaced by + * interpolated/MC frames, anything else is a bug in the codec ... + */ + abort(); return -1; } @@ -1166,7 +804,7 @@ static void update_noise_reduction(MpegEncContext *s){ } s->dct_count[intra] >>= 1; } - + for(i=0; i<64; i++){ s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1); } @@ -1180,12 +818,13 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx) { int i; AVFrame *pic; - s->mb_skiped = 0; + s->mb_skipped = 0; assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3); /* mark&release old frames */ - if (s->pict_type != B_TYPE && s->last_picture_ptr && s->last_picture_ptr->data[0]) { + if (s->pict_type != FF_B_TYPE && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->data[0]) { + if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){ avctx->release_buffer(avctx, (AVFrame*)s->last_picture_ptr); /* release forgotten pictures */ @@ -1194,14 +833,15 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx) for(i=0; ipicture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference){ av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n"); - avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]); + avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]); } } } + } } alloc: if(!s->encoding){ - /* release non refernce frames */ + /* release non reference frames */ for(i=0; ipicture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){ s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]); @@ -1209,16 +849,22 @@ alloc: } if(s->current_picture_ptr && s->current_picture_ptr->data[0]==NULL) - pic= (AVFrame*)s->current_picture_ptr; //we allready have a unused image (maybe it was set before reading the header) + pic= (AVFrame*)s->current_picture_ptr; //we already have a unused image (maybe it was set before reading the header) else{ i= ff_find_unused_picture(s, 0); pic= (AVFrame*)&s->picture[i]; } - pic->reference= s->pict_type != B_TYPE ? 3 : 0; + pic->reference= 0; + if (!s->dropable){ + if (s->codec_id == CODEC_ID_H264) + pic->reference = s->picture_structure; + else if (s->pict_type != FF_B_TYPE) + pic->reference = 3; + } pic->coded_picture_number= s->coded_picture_number++; - + if( alloc_picture(s, (Picture*)pic, 0) < 0) return -1; @@ -1228,51 +874,55 @@ alloc: } s->current_picture_ptr->pict_type= s->pict_type; -// if(s->flags && CODEC_FLAG_QSCALE) +// if(s->flags && CODEC_FLAG_QSCALE) // s->current_picture_ptr->quality= s->new_picture_ptr->quality; - s->current_picture_ptr->key_frame= s->pict_type == I_TYPE; + s->current_picture_ptr->key_frame= s->pict_type == FF_I_TYPE; copy_picture(&s->current_picture, s->current_picture_ptr); - - if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){ - if (s->pict_type != B_TYPE) { + + if (s->pict_type != FF_B_TYPE) { s->last_picture_ptr= s->next_picture_ptr; - s->next_picture_ptr= s->current_picture_ptr; + if(!s->dropable) + s->next_picture_ptr= s->current_picture_ptr; } - +/* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr, + s->last_picture_ptr ? s->last_picture_ptr->data[0] : NULL, + s->next_picture_ptr ? s->next_picture_ptr->data[0] : NULL, + s->current_picture_ptr ? s->current_picture_ptr->data[0] : NULL, + s->pict_type, s->dropable);*/ + if(s->last_picture_ptr) copy_picture(&s->last_picture, s->last_picture_ptr); if(s->next_picture_ptr) copy_picture(&s->next_picture, s->next_picture_ptr); - - if(s->pict_type != I_TYPE && (s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL)){ + + if(s->pict_type != FF_I_TYPE && (s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL) && !s->dropable){ av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n"); - assert(s->pict_type != B_TYPE); //these should have been dropped if we dont have a reference + assert(s->pict_type != FF_B_TYPE); //these should have been dropped if we don't have a reference goto alloc; } - assert(s->pict_type == I_TYPE || (s->last_picture_ptr && s->last_picture_ptr->data[0])); + assert(s->pict_type == FF_I_TYPE || (s->last_picture_ptr && s->last_picture_ptr->data[0])); - if(s->picture_structure!=PICT_FRAME){ + if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){ int i; for(i=0; i<4; i++){ if(s->picture_structure == PICT_BOTTOM_FIELD){ s->current_picture.data[i] += s->current_picture.linesize[i]; - } + } s->current_picture.linesize[i] *= 2; s->last_picture.linesize[i] *=2; s->next_picture.linesize[i] *=2; } } - } - + s->hurry_up= s->avctx->hurry_up; s->error_resilience= avctx->error_resilience; - /* set dequantizer, we cant do it during init as it might change for mpeg4 - and we cant do it in the header decode as init isnt called for mpeg4 there yet */ + /* set dequantizer, we can't do it during init as it might change for mpeg4 + and we can't do it in the header decode as init is not called for mpeg4 there yet */ if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){ s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra; s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter; - }else if(s->out_format == FMT_H263){ + }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){ s->dct_unquantize_intra = s->dct_unquantize_h263_intra; s->dct_unquantize_inter = s->dct_unquantize_h263_inter; }else{ @@ -1285,7 +935,7 @@ alloc: update_noise_reduction(s); } - + #ifdef HAVE_XVMC if(s->avctx->xvmc_acceleration) return XVMC_field_start(s, avctx); @@ -1304,15 +954,16 @@ void MPV_frame_end(MpegEncContext *s) XVMC_field_end(s); }else #endif - if(s->unrestricted_mv && s->pict_type != B_TYPE && !s->intra_only && !(s->flags&CODEC_FLAG_EMU_EDGE)) { - draw_edges(s->current_picture.data[0], s->linesize , s->h_edge_pos , s->v_edge_pos , EDGE_WIDTH ); - draw_edges(s->current_picture.data[1], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2); - draw_edges(s->current_picture.data[2], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2); + if(s->unrestricted_mv && s->current_picture.reference && !s->intra_only && !(s->flags&CODEC_FLAG_EMU_EDGE)) { + s->dsp.draw_edges(s->current_picture.data[0], s->linesize , s->h_edge_pos , s->v_edge_pos , EDGE_WIDTH ); + s->dsp.draw_edges(s->current_picture.data[1], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2); + s->dsp.draw_edges(s->current_picture.data[2], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2); } emms_c(); - + s->last_pict_type = s->pict_type; - if(s->pict_type!=B_TYPE){ + s->last_lambda_for[s->pict_type]= s->current_picture_ptr->quality; + if(s->pict_type!=FF_B_TYPE){ s->last_non_b_pict_type= s->pict_type; } #if 0 @@ -1321,13 +972,13 @@ void MPV_frame_end(MpegEncContext *s) if(s->picture[i].data[0] == s->current_picture.data[0]){ s->picture[i]= s->current_picture; break; - } + } } assert(iencoding){ - /* release non refernce frames */ + /* release non-reference frames */ for(i=0; ipicture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){ s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]); @@ -1340,6 +991,7 @@ void MPV_frame_end(MpegEncContext *s) memset(&s->next_picture, 0, sizeof(Picture)); memset(&s->current_picture, 0, sizeof(Picture)); #endif + s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr; } /** @@ -1350,39 +1002,43 @@ void MPV_frame_end(MpegEncContext *s) * @param color color of the arrow */ static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){ - int t, x, y, f; - - sx= clip(sx, 0, w-1); - sy= clip(sy, 0, h-1); - ex= clip(ex, 0, w-1); - ey= clip(ey, 0, h-1); - + int x, y, fr, f; + + sx= av_clip(sx, 0, w-1); + sy= av_clip(sy, 0, h-1); + ex= av_clip(ex, 0, w-1); + ey= av_clip(ey, 0, h-1); + buf[sy*stride + sx]+= color; - - if(ABS(ex - sx) > ABS(ey - sy)){ + + if(FFABS(ex - sx) > FFABS(ey - sy)){ if(sx > ex){ - t=sx; sx=ex; ex=t; - t=sy; sy=ey; ey=t; + FFSWAP(int, sx, ex); + FFSWAP(int, sy, ey); } buf+= sx + sy*stride; ex-= sx; f= ((ey-sy)<<16)/ex; for(x= 0; x <= ex; x++){ - y= ((x*f) + (1<<15))>>16; - buf[y*stride + x]+= color; + y = (x*f)>>16; + fr= (x*f)&0xFFFF; + buf[ y *stride + x]+= (color*(0x10000-fr))>>16; + buf[(y+1)*stride + x]+= (color* fr )>>16; } }else{ if(sy > ey){ - t=sx; sx=ex; ex=t; - t=sy; sy=ey; ey=t; + FFSWAP(int, sx, ex); + FFSWAP(int, sy, ey); } buf+= sx + sy*stride; ey-= sy; if(ey) f= ((ex-sx)<<16)/ey; else f= 0; for(y= 0; y <= ey; y++){ - x= ((y*f) + (1<<15))>>16; - buf[y*stride + x]+= color; + x = (y*f)>>16; + fr= (y*f)&0xFFFF; + buf[y*stride + x ]+= (color*(0x10000-fr))>>16; + buf[y*stride + x+1]+= (color* fr )>>16; } } } @@ -1394,26 +1050,26 @@ static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h * @param stride stride/linesize of the image * @param color color of the arrow */ -static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){ +static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){ int dx,dy; - sx= clip(sx, -100, w+100); - sy= clip(sy, -100, h+100); - ex= clip(ex, -100, w+100); - ey= clip(ey, -100, h+100); - + sx= av_clip(sx, -100, w+100); + sy= av_clip(sy, -100, h+100); + ex= av_clip(ex, -100, w+100); + ey= av_clip(ey, -100, h+100); + dx= ex - sx; dy= ey - sy; - + if(dx*dx + dy*dy > 3*3){ int rx= dx + dy; int ry= -dx + dy; int length= ff_sqrt((rx*rx + ry*ry)<<8); - + //FIXME subpixel accuracy rx= ROUNDED_DIV(rx*3<<4, length); ry= ROUNDED_DIV(ry*3<<4, length); - + draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color); draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color); } @@ -1429,7 +1085,7 @@ void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){ if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){ int x,y; - + av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: "); switch (pict->pict_type) { case FF_I_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"I\n"); break; @@ -1437,7 +1093,7 @@ void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){ case FF_B_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"B\n"); break; case FF_S_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"S\n"); break; case FF_SI_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SI\n"); break; - case FF_SP_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SP\n"); break; + case FF_SP_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SP\n"); break; } for(y=0; ymb_height; y++){ for(x=0; xmb_width; x++){ @@ -1478,20 +1134,20 @@ void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){ assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1)); av_log(s->avctx, AV_LOG_DEBUG, "X"); } - + //segmentation if(IS_8X8(mb_type)) av_log(s->avctx, AV_LOG_DEBUG, "+"); else if(IS_16X8(mb_type)) av_log(s->avctx, AV_LOG_DEBUG, "-"); else if(IS_8X16(mb_type)) - av_log(s->avctx, AV_LOG_DEBUG, "¦"); + av_log(s->avctx, AV_LOG_DEBUG, "|"); else if(IS_INTRA(mb_type) || IS_16X16(mb_type)) av_log(s->avctx, AV_LOG_DEBUG, " "); else av_log(s->avctx, AV_LOG_DEBUG, "?"); - - + + if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264) av_log(s->avctx, AV_LOG_DEBUG, "="); else @@ -1508,16 +1164,21 @@ void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){ int mb_y; uint8_t *ptr; int i; - int h_chroma_shift, v_chroma_shift; + int h_chroma_shift, v_chroma_shift, block_height; + const int width = s->avctx->width; + const int height= s->avctx->height; + const int mv_sample_log2= 4 - pict->motion_subsample_log2; + const int mv_stride= (s->mb_width << mv_sample_log2) + (s->codec_id == CODEC_ID_H264 ? 0 : 1); s->low_delay=0; //needed to see the vectors without trashing the buffers avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift); for(i=0; i<3; i++){ - memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*s->height:pict->linesize[i]*s->height >> v_chroma_shift); + memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*height:pict->linesize[i]*height >> v_chroma_shift); pict->data[i]= s->visualization_buffer[i]; } pict->type= FF_BUFFER_TYPE_COPY; ptr= pict->data[0]; + block_height = 16>>v_chroma_shift; for(mb_y=0; mb_ymb_height; mb_y++){ int mb_x; @@ -1526,7 +1187,7 @@ void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){ if((s->avctx->debug_mv) && pict->motion_val){ int type; for(type=0; type<3; type++){ - int direction; + int direction = 0; switch (type) { case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=FF_P_TYPE)) continue; @@ -1549,37 +1210,55 @@ void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){ for(i=0; i<4; i++){ int sx= mb_x*16 + 4 + 8*(i&1); int sy= mb_y*16 + 4 + 8*(i>>1); - int xy= 1 + mb_x*2 + (i&1) + (mb_y*2 + 1 + (i>>1))*(s->mb_width*2 + 2); + int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1); int mx= (pict->motion_val[direction][xy][0]>>shift) + sx; int my= (pict->motion_val[direction][xy][1]>>shift) + sy; - draw_arrow(ptr, sx, sy, mx, my, s->width, s->height, s->linesize, 100); + draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100); } }else if(IS_16X8(pict->mb_type[mb_index])){ int i; for(i=0; i<2; i++){ int sx=mb_x*16 + 8; int sy=mb_y*16 + 4 + 8*i; - int xy=1 + mb_x*2 + (mb_y*2 + 1 + i)*(s->mb_width*2 + 2); - int mx=(pict->motion_val[direction][xy][0]>>shift) + sx; - int my=(pict->motion_val[direction][xy][1]>>shift) + sy; - draw_arrow(ptr, sx, sy, mx, my, s->width, s->height, s->linesize, 100); + int xy= (mb_x*2 + (mb_y*2 + i)*mv_stride) << (mv_sample_log2-1); + int mx=(pict->motion_val[direction][xy][0]>>shift); + int my=(pict->motion_val[direction][xy][1]>>shift); + + if(IS_INTERLACED(pict->mb_type[mb_index])) + my*=2; + + draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100); + } + }else if(IS_8X16(pict->mb_type[mb_index])){ + int i; + for(i=0; i<2; i++){ + int sx=mb_x*16 + 4 + 8*i; + int sy=mb_y*16 + 8; + int xy= (mb_x*2 + i + mb_y*2*mv_stride) << (mv_sample_log2-1); + int mx=(pict->motion_val[direction][xy][0]>>shift); + int my=(pict->motion_val[direction][xy][1]>>shift); + + if(IS_INTERLACED(pict->mb_type[mb_index])) + my*=2; + + draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100); } }else{ int sx= mb_x*16 + 8; int sy= mb_y*16 + 8; - int xy= 1 + mb_x*2 + (mb_y*2 + 1)*(s->mb_width*2 + 2); + int xy= (mb_x + mb_y*mv_stride) << mv_sample_log2; int mx= (pict->motion_val[direction][xy][0]>>shift) + sx; int my= (pict->motion_val[direction][xy][1]>>shift) + sy; - draw_arrow(ptr, sx, sy, mx, my, s->width, s->height, s->linesize, 100); + draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100); } - } + } } if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){ uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL; int y; - for(y=0; y<8; y++){ - *(uint64_t*)(pict->data[1] + 8*mb_x + (8*mb_y + y)*pict->linesize[1])= c; - *(uint64_t*)(pict->data[2] + 8*mb_x + (8*mb_y + y)*pict->linesize[2])= c; + for(y=0; ydata[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= c; + *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= c; } } if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){ @@ -1590,7 +1269,7 @@ void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){ u= (int)(128 + r*cos(theta*3.141592/180));\ v= (int)(128 + r*sin(theta*3.141592/180)); - + u=v=128; if(IS_PCM(mb_type)){ COLOR(120,48) @@ -1619,9 +1298,9 @@ v= (int)(128 + r*sin(theta*3.141592/180)); u*= 0x0101010101010101ULL; v*= 0x0101010101010101ULL; - for(y=0; y<8; y++){ - *(uint64_t*)(pict->data[1] + 8*mb_x + (8*mb_y + y)*pict->linesize[1])= u; - *(uint64_t*)(pict->data[2] + 8*mb_x + (8*mb_y + y)*pict->linesize[2])= v; + for(y=0; ydata[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= u; + *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= v; } //segmentation @@ -1633,7 +1312,22 @@ v= (int)(128 + r*sin(theta*3.141592/180)); for(y=0; y<16; y++) pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80; } - + if(IS_8X8(mb_type) && mv_sample_log2 >= 2){ + int dm= 1 << (mv_sample_log2-2); + for(i=0; i<4; i++){ + int sx= mb_x*16 + 8*(i&1); + int sy= mb_y*16 + 8*(i>>1); + int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1); + //FIXME bidir + int32_t *mv = (int32_t*)&pict->motion_val[0][xy]; + if(mv[0] != mv[dm] || mv[dm*mv_stride] != mv[dm*(mv_stride+1)]) + for(y=0; y<8; y++) + pict->data[0][sx + 4 + (sy + y)*pict->linesize[0]]^= 0x80; + if(mv[0] != mv[dm*mv_stride] || mv[dm] != mv[dm*(mv_stride+1)]) + *(uint64_t*)(pict->data[0] + sx + (sy + 4)*pict->linesize[0])^= 0x8080808080808080ULL; + } + } + if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){ // hmm } @@ -1644,931 +1338,197 @@ v= (int)(128 + r*sin(theta*3.141592/180)); } } -#ifdef CONFIG_ENCODERS - -static int get_sae(uint8_t *src, int ref, int stride){ - int x,y; - int acc=0; - - for(y=0; y<16; y++){ - for(x=0; x<16; x++){ - acc+= ABS(src[x+y*stride] - ref); - } - } - - return acc; -} - -static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int stride){ - int x, y, w, h; - int acc=0; - - w= s->width &~15; - h= s->height&~15; - - for(y=0; ydsp.sad[0](NULL, src + offset, ref + offset, stride, 16); - int mean= (s->dsp.pix_sum(src + offset, stride) + 128)>>8; - int sae = get_sae(src + offset, mean, stride); - - acc+= sae + 500 < sad; - } - } - return acc; -} - - -static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){ - AVFrame *pic=NULL; - int i; - const int encoding_delay= s->max_b_frames; - int direct=1; - - if(pic_arg){ - if(encoding_delay && !(s->flags&CODEC_FLAG_INPUT_PRESERVED)) direct=0; - if(pic_arg->linesize[0] != s->linesize) direct=0; - if(pic_arg->linesize[1] != s->uvlinesize) direct=0; - if(pic_arg->linesize[2] != s->uvlinesize) direct=0; - -// av_log(AV_LOG_DEBUG, "%d %d %d %d\n",pic_arg->linesize[0], pic_arg->linesize[1], s->linesize, s->uvlinesize); - - if(direct){ - i= ff_find_unused_picture(s, 1); - - pic= (AVFrame*)&s->picture[i]; - pic->reference= 3; - - for(i=0; i<4; i++){ - pic->data[i]= pic_arg->data[i]; - pic->linesize[i]= pic_arg->linesize[i]; - } - alloc_picture(s, (Picture*)pic, 1); - }else{ - int offset= 16; - i= ff_find_unused_picture(s, 0); - - pic= (AVFrame*)&s->picture[i]; - pic->reference= 3; - - alloc_picture(s, (Picture*)pic, 0); - - if( pic->data[0] + offset == pic_arg->data[0] - && pic->data[1] + offset == pic_arg->data[1] - && pic->data[2] + offset == pic_arg->data[2]){ - // empty - }else{ - int h_chroma_shift, v_chroma_shift; - avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift); - - for(i=0; i<3; i++){ - int src_stride= pic_arg->linesize[i]; - int dst_stride= i ? s->uvlinesize : s->linesize; - int h_shift= i ? h_chroma_shift : 0; - int v_shift= i ? v_chroma_shift : 0; - int w= s->width >>h_shift; - int h= s->height>>v_shift; - uint8_t *src= pic_arg->data[i]; - uint8_t *dst= pic->data[i] + offset; - - if(src_stride==dst_stride) - memcpy(dst, src, src_stride*h); - else{ - while(h--){ - memcpy(dst, src, w); - dst += dst_stride; - src += src_stride; - } - } - } - } - } - copy_picture_attributes(pic, pic_arg); - - pic->display_picture_number= s->input_picture_number++; - } - - /* shift buffer entries */ - for(i=1; iencoding_delay+1*/; i++) - s->input_picture[i-1]= s->input_picture[i]; - - s->input_picture[encoding_delay]= (Picture*)pic; - - return 0; -} - -static void select_input_picture(MpegEncContext *s){ - int i; - - for(i=1; ireordered_input_picture[i-1]= s->reordered_input_picture[i]; - s->reordered_input_picture[MAX_PICTURE_COUNT-1]= NULL; - - /* set next picture types & ordering */ - if(s->reordered_input_picture[0]==NULL && s->input_picture[0]){ - if(/*s->picture_in_gop_number >= s->gop_size ||*/ s->next_picture_ptr==NULL || s->intra_only){ - s->reordered_input_picture[0]= s->input_picture[0]; - s->reordered_input_picture[0]->pict_type= I_TYPE; - s->reordered_input_picture[0]->coded_picture_number= s->coded_picture_number++; - }else{ - int b_frames; - - if(s->flags&CODEC_FLAG_PASS2){ - for(i=0; imax_b_frames+1; i++){ - int pict_num= s->input_picture[0]->display_picture_number + i; - int pict_type= s->rc_context.entry[pict_num].new_pict_type; - s->input_picture[i]->pict_type= pict_type; - - if(i + 1 >= s->rc_context.num_entries) break; - } - } - - if(s->input_picture[0]->pict_type){ - /* user selected pict_type */ - for(b_frames=0; b_framesmax_b_frames+1; b_frames++){ - if(s->input_picture[b_frames]->pict_type!=B_TYPE) break; - } - - if(b_frames > s->max_b_frames){ - av_log(s->avctx, AV_LOG_ERROR, "warning, too many bframes in a row\n"); - b_frames = s->max_b_frames; - } - }else if(s->avctx->b_frame_strategy==0){ - b_frames= s->max_b_frames; - while(b_frames && !s->input_picture[b_frames]) b_frames--; - }else if(s->avctx->b_frame_strategy==1){ - for(i=1; imax_b_frames+1; i++){ - if(s->input_picture[i] && s->input_picture[i]->b_frame_score==0){ - s->input_picture[i]->b_frame_score= - get_intra_count(s, s->input_picture[i ]->data[0], - s->input_picture[i-1]->data[0], s->linesize) + 1; - } - } - for(i=0; imax_b_frames; i++){ - if(s->input_picture[i]==NULL || s->input_picture[i]->b_frame_score - 1 > s->mb_num/40) break; - } - - b_frames= FFMAX(0, i-1); - - /* reset scores */ - for(i=0; iinput_picture[i]->b_frame_score=0; - } - }else{ - av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n"); - b_frames=0; - } - - emms_c(); -//static int b_count=0; -//b_count+= b_frames; -//av_log(s->avctx, AV_LOG_DEBUG, "b_frames: %d\n", b_count); - if(s->picture_in_gop_number + b_frames >= s->gop_size){ - if(s->flags & CODEC_FLAG_CLOSED_GOP) - b_frames=0; - s->input_picture[b_frames]->pict_type= I_TYPE; - } - - if( (s->flags & CODEC_FLAG_CLOSED_GOP) - && b_frames - && s->input_picture[b_frames]->pict_type== I_TYPE) - b_frames--; - - s->reordered_input_picture[0]= s->input_picture[b_frames]; - if(s->reordered_input_picture[0]->pict_type != I_TYPE) - s->reordered_input_picture[0]->pict_type= P_TYPE; - s->reordered_input_picture[0]->coded_picture_number= s->coded_picture_number++; - for(i=0; ireordered_input_picture[i+1]= s->input_picture[i]; - s->reordered_input_picture[i+1]->pict_type= B_TYPE; - s->reordered_input_picture[i+1]->coded_picture_number= s->coded_picture_number++; - } - } - } - - if(s->reordered_input_picture[0]){ - s->reordered_input_picture[0]->reference= s->reordered_input_picture[0]->pict_type!=B_TYPE ? 3 : 0; - - copy_picture(&s->new_picture, s->reordered_input_picture[0]); - - if(s->reordered_input_picture[0]->type == FF_BUFFER_TYPE_SHARED){ - // input is a shared pix, so we cant modifiy it -> alloc a new one & ensure that the shared one is reuseable - - int i= ff_find_unused_picture(s, 0); - Picture *pic= &s->picture[i]; - - /* mark us unused / free shared pic */ - for(i=0; i<4; i++) - s->reordered_input_picture[0]->data[i]= NULL; - s->reordered_input_picture[0]->type= 0; - - copy_picture_attributes((AVFrame*)pic, (AVFrame*)s->reordered_input_picture[0]); - pic->reference = s->reordered_input_picture[0]->reference; - - alloc_picture(s, pic, 0); - - s->current_picture_ptr= pic; - }else{ - // input is not a shared pix -> reuse buffer for current_pix - - assert( s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_USER - || s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_INTERNAL); - - s->current_picture_ptr= s->reordered_input_picture[0]; - for(i=0; i<4; i++){ - s->new_picture.data[i]+=16; - } - } - copy_picture(&s->current_picture, s->current_picture_ptr); - - s->picture_number= s->new_picture.display_picture_number; -//printf("dpn:%d\n", s->picture_number); - }else{ - memset(&s->new_picture, 0, sizeof(Picture)); - } -} - -int MPV_encode_picture(AVCodecContext *avctx, - unsigned char *buf, int buf_size, void *data) -{ - MpegEncContext *s = avctx->priv_data; - AVFrame *pic_arg = data; - int i, stuffing_count; - - if(avctx->pix_fmt != PIX_FMT_YUV420P){ - av_log(avctx, AV_LOG_ERROR, "this codec supports only YUV420P\n"); - return -1; - } - - init_put_bits(&s->pb, buf, buf_size); - - s->picture_in_gop_number++; - - load_input_picture(s, pic_arg); - - select_input_picture(s); - - /* output? */ - if(s->new_picture.data[0]){ - s->pict_type= s->new_picture.pict_type; -//emms_c(); -//printf("qs:%f %f %d\n", s->new_picture.quality, s->current_picture.quality, s->qscale); - MPV_frame_start(s, avctx); - - encode_picture(s, s->picture_number); - - avctx->real_pict_num = s->picture_number; - avctx->header_bits = s->header_bits; - avctx->mv_bits = s->mv_bits; - avctx->misc_bits = s->misc_bits; - avctx->i_tex_bits = s->i_tex_bits; - avctx->p_tex_bits = s->p_tex_bits; - avctx->i_count = s->i_count; - avctx->p_count = s->mb_num - s->i_count - s->skip_count; //FIXME f/b_count in avctx - avctx->skip_count = s->skip_count; - - MPV_frame_end(s); - - if (s->out_format == FMT_MJPEG) - mjpeg_picture_trailer(s); - - if(s->flags&CODEC_FLAG_PASS1) - ff_write_pass1_stats(s); - - for(i=0; i<4; i++){ - avctx->error[i] += s->current_picture_ptr->error[i]; - } - - flush_put_bits(&s->pb); - s->frame_bits = (pbBufPtr(&s->pb) - s->pb.buf) * 8; - - stuffing_count= ff_vbv_update(s, s->frame_bits); - if(stuffing_count){ - switch(s->codec_id){ - case CODEC_ID_MPEG1VIDEO: - case CODEC_ID_MPEG2VIDEO: - while(stuffing_count--){ - put_bits(&s->pb, 8, 0); - } - break; - case CODEC_ID_MPEG4: - put_bits(&s->pb, 16, 0); - put_bits(&s->pb, 16, 0x1C3); - stuffing_count -= 4; - while(stuffing_count--){ - put_bits(&s->pb, 8, 0xFF); - } - break; - default: - av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n"); - } - flush_put_bits(&s->pb); - s->frame_bits = (pbBufPtr(&s->pb) - s->pb.buf) * 8; - } - - /* update mpeg1/2 vbv_delay for CBR */ - if(s->avctx->rc_max_rate && s->avctx->rc_min_rate == s->avctx->rc_max_rate){ - int vbv_delay; - - assert(s->repeat_first_field==0); - - vbv_delay= lrintf(90000 * s->rc_context.buffer_index / s->avctx->rc_max_rate); - assert(vbv_delay < 0xFFFF); - - s->vbv_delay_ptr[0] &= 0xF8; - s->vbv_delay_ptr[0] |= vbv_delay>>13; - s->vbv_delay_ptr[1] = vbv_delay>>5; - s->vbv_delay_ptr[2] &= 0x07; - s->vbv_delay_ptr[2] |= vbv_delay<<3; - } - s->total_bits += s->frame_bits; - avctx->frame_bits = s->frame_bits; - }else{ - assert((pbBufPtr(&s->pb) == s->pb.buf)); - s->frame_bits=0; - } - assert((s->frame_bits&7)==0); - - return s->frame_bits/8; -} - -#endif //CONFIG_ENCODERS - -static inline void gmc1_motion(MpegEncContext *s, - uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, - int dest_offset, - uint8_t **ref_picture, int src_offset) -{ - uint8_t *ptr; - int offset, src_x, src_y, linesize, uvlinesize; - int motion_x, motion_y; - int emu=0; - - motion_x= s->sprite_offset[0][0]; - motion_y= s->sprite_offset[0][1]; - src_x = s->mb_x * 16 + (motion_x >> (s->sprite_warping_accuracy+1)); - src_y = s->mb_y * 16 + (motion_y >> (s->sprite_warping_accuracy+1)); - motion_x<<=(3-s->sprite_warping_accuracy); - motion_y<<=(3-s->sprite_warping_accuracy); - src_x = clip(src_x, -16, s->width); - if (src_x == s->width) - motion_x =0; - src_y = clip(src_y, -16, s->height); - if (src_y == s->height) - motion_y =0; - - linesize = s->linesize; - uvlinesize = s->uvlinesize; - - ptr = ref_picture[0] + (src_y * linesize) + src_x + src_offset; - - dest_y+=dest_offset; - if(s->flags&CODEC_FLAG_EMU_EDGE){ - if( (unsigned)src_x >= s->h_edge_pos - 17 - || (unsigned)src_y >= s->v_edge_pos - 17){ - ff_emulated_edge_mc(s->edge_emu_buffer, ptr, linesize, 17, 17, src_x, src_y, s->h_edge_pos, s->v_edge_pos); - ptr= s->edge_emu_buffer; - } - } - - if((motion_x|motion_y)&7){ - s->dsp.gmc1(dest_y , ptr , linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding); - s->dsp.gmc1(dest_y+8, ptr+8, linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding); - }else{ - int dxy; - - dxy= ((motion_x>>3)&1) | ((motion_y>>2)&2); - if (s->no_rounding){ - s->dsp.put_no_rnd_pixels_tab[0][dxy](dest_y, ptr, linesize, 16); - }else{ - s->dsp.put_pixels_tab [0][dxy](dest_y, ptr, linesize, 16); - } - } - - if(s->flags&CODEC_FLAG_GRAY) return; - - motion_x= s->sprite_offset[1][0]; - motion_y= s->sprite_offset[1][1]; - src_x = s->mb_x * 8 + (motion_x >> (s->sprite_warping_accuracy+1)); - src_y = s->mb_y * 8 + (motion_y >> (s->sprite_warping_accuracy+1)); - motion_x<<=(3-s->sprite_warping_accuracy); - motion_y<<=(3-s->sprite_warping_accuracy); - src_x = clip(src_x, -8, s->width>>1); - if (src_x == s->width>>1) - motion_x =0; - src_y = clip(src_y, -8, s->height>>1); - if (src_y == s->height>>1) - motion_y =0; - - offset = (src_y * uvlinesize) + src_x + (src_offset>>1); - ptr = ref_picture[1] + offset; - if(s->flags&CODEC_FLAG_EMU_EDGE){ - if( (unsigned)src_x >= (s->h_edge_pos>>1) - 9 - || (unsigned)src_y >= (s->v_edge_pos>>1) - 9){ - ff_emulated_edge_mc(s->edge_emu_buffer, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1); - ptr= s->edge_emu_buffer; - emu=1; - } - } - s->dsp.gmc1(dest_cb + (dest_offset>>1), ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding); - - ptr = ref_picture[2] + offset; - if(emu){ - ff_emulated_edge_mc(s->edge_emu_buffer, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1); - ptr= s->edge_emu_buffer; - } - s->dsp.gmc1(dest_cr + (dest_offset>>1), ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding); - - return; -} - -static inline void gmc_motion(MpegEncContext *s, - uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, - int dest_offset, - uint8_t **ref_picture, int src_offset) -{ - uint8_t *ptr; - int linesize, uvlinesize; - const int a= s->sprite_warping_accuracy; - int ox, oy; - - linesize = s->linesize; - uvlinesize = s->uvlinesize; - - ptr = ref_picture[0] + src_offset; - - dest_y+=dest_offset; - - ox= s->sprite_offset[0][0] + s->sprite_delta[0][0]*s->mb_x*16 + s->sprite_delta[0][1]*s->mb_y*16; - oy= s->sprite_offset[0][1] + s->sprite_delta[1][0]*s->mb_x*16 + s->sprite_delta[1][1]*s->mb_y*16; - - s->dsp.gmc(dest_y, ptr, linesize, 16, - ox, - oy, - s->sprite_delta[0][0], s->sprite_delta[0][1], - s->sprite_delta[1][0], s->sprite_delta[1][1], - a+1, (1<<(2*a+1)) - s->no_rounding, - s->h_edge_pos, s->v_edge_pos); - s->dsp.gmc(dest_y+8, ptr, linesize, 16, - ox + s->sprite_delta[0][0]*8, - oy + s->sprite_delta[1][0]*8, - s->sprite_delta[0][0], s->sprite_delta[0][1], - s->sprite_delta[1][0], s->sprite_delta[1][1], - a+1, (1<<(2*a+1)) - s->no_rounding, - s->h_edge_pos, s->v_edge_pos); - - if(s->flags&CODEC_FLAG_GRAY) return; - - - dest_cb+=dest_offset>>1; - dest_cr+=dest_offset>>1; - - ox= s->sprite_offset[1][0] + s->sprite_delta[0][0]*s->mb_x*8 + s->sprite_delta[0][1]*s->mb_y*8; - oy= s->sprite_offset[1][1] + s->sprite_delta[1][0]*s->mb_x*8 + s->sprite_delta[1][1]*s->mb_y*8; - - ptr = ref_picture[1] + (src_offset>>1); - s->dsp.gmc(dest_cb, ptr, uvlinesize, 8, - ox, - oy, - s->sprite_delta[0][0], s->sprite_delta[0][1], - s->sprite_delta[1][0], s->sprite_delta[1][1], - a+1, (1<<(2*a+1)) - s->no_rounding, - s->h_edge_pos>>1, s->v_edge_pos>>1); - - ptr = ref_picture[2] + (src_offset>>1); - s->dsp.gmc(dest_cr, ptr, uvlinesize, 8, - ox, - oy, - s->sprite_delta[0][0], s->sprite_delta[0][1], - s->sprite_delta[1][0], s->sprite_delta[1][1], - a+1, (1<<(2*a+1)) - s->no_rounding, - s->h_edge_pos>>1, s->v_edge_pos>>1); -} - -/** - * Copies a rectangular area of samples to a temporary buffer and replicates the boarder samples. - * @param buf destination buffer - * @param src source buffer - * @param linesize number of bytes between 2 vertically adjacent samples in both the source and destination buffers - * @param block_w width of block - * @param block_h height of block - * @param src_x x coordinate of the top left sample of the block in the source buffer - * @param src_y y coordinate of the top left sample of the block in the source buffer - * @param w width of the source buffer - * @param h height of the source buffer - */ -void ff_emulated_edge_mc(uint8_t *buf, uint8_t *src, int linesize, int block_w, int block_h, - int src_x, int src_y, int w, int h){ - int x, y; - int start_y, start_x, end_y, end_x; - - if(src_y>= h){ - src+= (h-1-src_y)*linesize; - src_y=h-1; - }else if(src_y<=-block_h){ - src+= (1-block_h-src_y)*linesize; - src_y=1-block_h; - } - if(src_x>= w){ - src+= (w-1-src_x); - src_x=w-1; - }else if(src_x<=-block_w){ - src+= (1-block_w-src_x); - src_x=1-block_w; - } - - start_y= FFMAX(0, -src_y); - start_x= FFMAX(0, -src_x); - end_y= FFMIN(block_h, h-src_y); - end_x= FFMIN(block_w, w-src_x); - - // copy existing part - for(y=start_y; yavctx->lowres; + const int s_mask= (2<quarter_sample){ + motion_x/=2; + motion_y/=2; + } + + sx= motion_x & s_mask; + sy= motion_y & s_mask; + src_x += motion_x >> (lowres+1); + src_y += motion_y >> (lowres+1); - dxy = ((motion_y & 1) << 1) | (motion_x & 1); - src_x += motion_x >> 1; - src_y += motion_y >> 1; - - /* WARNING: do no forget half pels */ - src_x = clip(src_x, -16, width); //FIXME unneeded for emu? - if (src_x == width) - dxy &= ~1; - src_y = clip(src_y, -16, height); - if (src_y == height) - dxy &= ~2; src += src_y * stride + src_x; - if(s->unrestricted_mv && (s->flags&CODEC_FLAG_EMU_EDGE)){ - if( (unsigned)src_x > h_edge_pos - (motion_x&1) - w - || (unsigned)src_y > v_edge_pos - (motion_y&1) - h){ - ff_emulated_edge_mc(s->edge_emu_buffer, src, stride, w+1, h+1, - src_x, src_y, h_edge_pos, v_edge_pos); - src= s->edge_emu_buffer; - emu=1; - } + if( (unsigned)src_x > h_edge_pos - (!!sx) - w + || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){ + ff_emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<edge_emu_buffer; + emu=1; } - pix_op[dxy](dest, src, stride, h); + + sx <<= 2 - lowres; + sy <<= 2 - lowres; + if(field_select) + src += s->linesize; + pix_op[lowres](dest, src, stride, h, sx, sy); return emu; } /* apply one mpeg motion vector to the three components */ -static inline void mpeg_motion(MpegEncContext *s, +static av_always_inline void mpeg_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, - int dest_offset, - uint8_t **ref_picture, int src_offset, - int field_based, op_pixels_func (*pix_op)[4], + int field_based, int bottom_field, int field_select, + uint8_t **ref_picture, h264_chroma_mc_func *pix_op, int motion_x, int motion_y, int h) { - uint8_t *ptr; - int dxy, offset, mx, my, src_x, src_y, height, v_edge_pos, uvlinesize; - int emu=0; -#if 0 -if(s->quarter_sample) -{ - motion_x>>=1; - motion_y>>=1; -} -#endif - - height = s->height >> field_based; - v_edge_pos = s->v_edge_pos >> field_based; + uint8_t *ptr_y, *ptr_cb, *ptr_cr; + int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy; + const int lowres= s->avctx->lowres; + const int block_s= 8>>lowres; + const int s_mask= (2<h_edge_pos >> lowres; + const int v_edge_pos = s->v_edge_pos >> lowres; + linesize = s->current_picture.linesize[0] << field_based; uvlinesize = s->current_picture.linesize[1] << field_based; - emu= hpel_motion(s, - dest_y + dest_offset, ref_picture[0] + src_offset, - s->mb_x * 16, s->mb_y * (16 >> field_based), - s->width, height, s->current_picture.linesize[0] << field_based, - s->h_edge_pos, v_edge_pos, - 16, h, pix_op[0], - motion_x, motion_y); + if(s->quarter_sample){ //FIXME obviously not perfect but qpel will not work in lowres anyway + motion_x/=2; + motion_y/=2; + } + if(field_based){ + motion_y += (bottom_field - field_select)*((1<flags&CODEC_FLAG_GRAY) return; + sx= motion_x & s_mask; + sy= motion_y & s_mask; + src_x = s->mb_x*2*block_s + (motion_x >> (lowres+1)); + src_y =(s->mb_y*2*block_s>>field_based) + (motion_y >> (lowres+1)); if (s->out_format == FMT_H263) { - dxy = 0; - if ((motion_x & 3) != 0) - dxy |= 1; - if ((motion_y & 3) != 0) - dxy |= 2; - mx = motion_x >> 2; - my = motion_y >> 2; + uvsx = ((motion_x>>1) & s_mask) | (sx&1); + uvsy = ((motion_y>>1) & s_mask) | (sy&1); + uvsrc_x = src_x>>1; + uvsrc_y = src_y>>1; + }else if(s->out_format == FMT_H261){//even chroma mv's are full pel in H261 + mx = motion_x / 4; + my = motion_y / 4; + uvsx = (2*mx) & s_mask; + uvsy = (2*my) & s_mask; + uvsrc_x = s->mb_x*block_s + (mx >> lowres); + uvsrc_y = s->mb_y*block_s + (my >> lowres); } else { mx = motion_x / 2; my = motion_y / 2; - dxy = ((my & 1) << 1) | (mx & 1); - mx >>= 1; - my >>= 1; + uvsx = mx & s_mask; + uvsy = my & s_mask; + uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1)); + uvsrc_y =(s->mb_y*block_s>>field_based) + (my >> (lowres+1)); } - - src_x = s->mb_x * 8 + mx; - src_y = s->mb_y * (8 >> field_based) + my; - src_x = clip(src_x, -8, s->width >> 1); - if (src_x == (s->width >> 1)) - dxy &= ~1; - src_y = clip(src_y, -8, height >> 1); - if (src_y == (height >> 1)) - dxy &= ~2; - offset = (src_y * uvlinesize) + src_x + (src_offset >> 1); - ptr = ref_picture[1] + offset; - if(emu){ - ff_emulated_edge_mc(s->edge_emu_buffer, ptr - (src_offset >> 1), s->uvlinesize, 9, 9+field_based, - src_x, src_y<h_edge_pos>>1, s->v_edge_pos>>1); - ptr= s->edge_emu_buffer + (src_offset >> 1); - } - pix_op[1][dxy](dest_cb + (dest_offset >> 1), ptr, uvlinesize, h >> 1); - ptr = ref_picture[2] + offset; - if(emu){ - ff_emulated_edge_mc(s->edge_emu_buffer, ptr - (src_offset >> 1), s->uvlinesize, 9, 9+field_based, - src_x, src_y<h_edge_pos>>1, s->v_edge_pos>>1); - ptr= s->edge_emu_buffer + (src_offset >> 1); + ptr_y = ref_picture[0] + src_y * linesize + src_x; + ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x; + ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x; + + if( (unsigned)src_x > h_edge_pos - (!!sx) - 2*block_s + || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){ + ff_emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based, + src_x, src_y<edge_emu_buffer; + if(!ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY)){ + uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize; + ff_emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9, 9+field_based, + uvsrc_x, uvsrc_y<>1, v_edge_pos>>1); + ff_emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based, + uvsrc_x, uvsrc_y<>1, v_edge_pos>>1); + ptr_cb= uvbuf; + ptr_cr= uvbuf+16; + } } - pix_op[1][dxy](dest_cr + (dest_offset >> 1), ptr, uvlinesize, h >> 1); -} -//FIXME move to dsputil, avg variant, 16x16 version -static inline void put_obmc(uint8_t *dst, uint8_t *src[5], int stride){ - int x; - uint8_t * const top = src[1]; - uint8_t * const left = src[2]; - uint8_t * const mid = src[0]; - uint8_t * const right = src[3]; - uint8_t * const bottom= src[4]; -#define OBMC_FILTER(x, t, l, m, r, b)\ - dst[x]= (t*top[x] + l*left[x] + m*mid[x] + r*right[x] + b*bottom[x] + 4)>>3 -#define OBMC_FILTER4(x, t, l, m, r, b)\ - OBMC_FILTER(x , t, l, m, r, b);\ - OBMC_FILTER(x+1 , t, l, m, r, b);\ - OBMC_FILTER(x +stride, t, l, m, r, b);\ - OBMC_FILTER(x+1+stride, t, l, m, r, b); - - x=0; - OBMC_FILTER (x , 2, 2, 4, 0, 0); - OBMC_FILTER (x+1, 2, 1, 5, 0, 0); - OBMC_FILTER4(x+2, 2, 1, 5, 0, 0); - OBMC_FILTER4(x+4, 2, 0, 5, 1, 0); - OBMC_FILTER (x+6, 2, 0, 5, 1, 0); - OBMC_FILTER (x+7, 2, 0, 4, 2, 0); - x+= stride; - OBMC_FILTER (x , 1, 2, 5, 0, 0); - OBMC_FILTER (x+1, 1, 2, 5, 0, 0); - OBMC_FILTER (x+6, 1, 0, 5, 2, 0); - OBMC_FILTER (x+7, 1, 0, 5, 2, 0); - x+= stride; - OBMC_FILTER4(x , 1, 2, 5, 0, 0); - OBMC_FILTER4(x+2, 1, 1, 6, 0, 0); - OBMC_FILTER4(x+4, 1, 0, 6, 1, 0); - OBMC_FILTER4(x+6, 1, 0, 5, 2, 0); - x+= 2*stride; - OBMC_FILTER4(x , 0, 2, 5, 0, 1); - OBMC_FILTER4(x+2, 0, 1, 6, 0, 1); - OBMC_FILTER4(x+4, 0, 0, 6, 1, 1); - OBMC_FILTER4(x+6, 0, 0, 5, 2, 1); - x+= 2*stride; - OBMC_FILTER (x , 0, 2, 5, 0, 1); - OBMC_FILTER (x+1, 0, 2, 5, 0, 1); - OBMC_FILTER4(x+2, 0, 1, 5, 0, 2); - OBMC_FILTER4(x+4, 0, 0, 5, 1, 2); - OBMC_FILTER (x+6, 0, 0, 5, 2, 1); - OBMC_FILTER (x+7, 0, 0, 5, 2, 1); - x+= stride; - OBMC_FILTER (x , 0, 2, 4, 0, 2); - OBMC_FILTER (x+1, 0, 1, 5, 0, 2); - OBMC_FILTER (x+6, 0, 0, 5, 1, 2); - OBMC_FILTER (x+7, 0, 0, 4, 2, 2); + + if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.data + dest_y += s->linesize; + dest_cb+= s->uvlinesize; + dest_cr+= s->uvlinesize; + } + + if(field_select){ + ptr_y += s->linesize; + ptr_cb+= s->uvlinesize; + ptr_cr+= s->uvlinesize; + } + + sx <<= 2 - lowres; + sy <<= 2 - lowres; + pix_op[lowres-1](dest_y, ptr_y, linesize, h, sx, sy); + + if(!ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY)){ + uvsx <<= 2 - lowres; + uvsy <<= 2 - lowres; + pix_op[lowres](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy); + pix_op[lowres](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy); + } + //FIXME h261 lowres loop filter } -/* obmc for 1 8x8 luma block */ -static inline void obmc_motion(MpegEncContext *s, - uint8_t *dest, uint8_t *src, - int src_x, int src_y, - op_pixels_func *pix_op, - int16_t mv[5][2]/* mid top left right bottom*/) -#define MID 0 -{ - int i; - uint8_t *ptr[5]; - - assert(s->quarter_sample==0); - - for(i=0; i<5; i++){ - if(i && mv[i][0]==mv[MID][0] && mv[i][1]==mv[MID][1]){ - ptr[i]= ptr[MID]; - }else{ - ptr[i]= s->edge_emu_buffer + 16 + 8*(i&1) + s->linesize*8*(i>>1); - hpel_motion(s, ptr[i], src, - src_x, src_y, - s->width, s->height, s->linesize, - s->h_edge_pos, s->v_edge_pos, - 8, 8, pix_op, - mv[i][0], mv[i][1]); - } - } - - put_obmc(dest, ptr, s->linesize); -} - -static inline void qpel_motion(MpegEncContext *s, - uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, - int dest_offset, - uint8_t **ref_picture, int src_offset, - int field_based, op_pixels_func (*pix_op)[4], - qpel_mc_func (*qpix_op)[16], - int motion_x, int motion_y, int h) -{ - uint8_t *ptr; - int dxy, offset, mx, my, src_x, src_y, height, v_edge_pos, linesize, uvlinesize; - int emu=0; - - dxy = ((motion_y & 3) << 2) | (motion_x & 3); - src_x = s->mb_x * 16 + (motion_x >> 2); - src_y = s->mb_y * (16 >> field_based) + (motion_y >> 2); - - height = s->height >> field_based; - v_edge_pos = s->v_edge_pos >> field_based; - src_x = clip(src_x, -16, s->width); - if (src_x == s->width) - dxy &= ~3; - src_y = clip(src_y, -16, height); - if (src_y == height) - dxy &= ~12; - linesize = s->linesize << field_based; - uvlinesize = s->uvlinesize << field_based; - ptr = ref_picture[0] + (src_y * linesize) + src_x + src_offset; - dest_y += dest_offset; -//printf("%d %d %d\n", src_x, src_y, dxy); - - if(s->flags&CODEC_FLAG_EMU_EDGE){ - if( (unsigned)src_x > s->h_edge_pos - (motion_x&3) - 16 - || (unsigned)src_y > v_edge_pos - (motion_y&3) - h ){ - ff_emulated_edge_mc(s->edge_emu_buffer, ptr - src_offset, s->linesize, 17, 17+field_based, - src_x, src_y<h_edge_pos, s->v_edge_pos); - ptr= s->edge_emu_buffer + src_offset; - emu=1; - } - } - if(!field_based) - qpix_op[0][dxy](dest_y, ptr, linesize); - else{ - //damn interlaced mode - //FIXME boundary mirroring is not exactly correct here - qpix_op[1][dxy](dest_y , ptr , linesize); - qpix_op[1][dxy](dest_y+8, ptr+8, linesize); - } - - if(s->flags&CODEC_FLAG_GRAY) return; - - if(field_based){ - mx= motion_x/2; - my= motion_y>>1; - }else if(s->workaround_bugs&FF_BUG_QPEL_CHROMA2){ - static const int rtab[8]= {0,0,1,1,0,0,0,1}; - mx= (motion_x>>1) + rtab[motion_x&7]; - my= (motion_y>>1) + rtab[motion_y&7]; - }else if(s->workaround_bugs&FF_BUG_QPEL_CHROMA){ - mx= (motion_x>>1)|(motion_x&1); - my= (motion_y>>1)|(motion_y&1); - }else{ - mx= motion_x/2; - my= motion_y/2; - } - mx= (mx>>1)|(mx&1); - my= (my>>1)|(my&1); - - dxy= (mx&1) | ((my&1)<<1); - mx>>=1; - my>>=1; - - src_x = s->mb_x * 8 + mx; - src_y = s->mb_y * (8 >> field_based) + my; - src_x = clip(src_x, -8, s->width >> 1); - if (src_x == (s->width >> 1)) - dxy &= ~1; - src_y = clip(src_y, -8, height >> 1); - if (src_y == (height >> 1)) - dxy &= ~2; - - offset = (src_y * uvlinesize) + src_x + (src_offset >> 1); - ptr = ref_picture[1] + offset; - if(emu){ - ff_emulated_edge_mc(s->edge_emu_buffer, ptr - (src_offset >> 1), s->uvlinesize, 9, 9 + field_based, - src_x, src_y<h_edge_pos>>1, s->v_edge_pos>>1); - ptr= s->edge_emu_buffer + (src_offset >> 1); - } - pix_op[1][dxy](dest_cb + (dest_offset >> 1), ptr, uvlinesize, h >> 1); - - ptr = ref_picture[2] + offset; - if(emu){ - ff_emulated_edge_mc(s->edge_emu_buffer, ptr - (src_offset >> 1), s->uvlinesize, 9, 9 + field_based, - src_x, src_y<h_edge_pos>>1, s->v_edge_pos>>1); - ptr= s->edge_emu_buffer + (src_offset >> 1); - } - pix_op[1][dxy](dest_cr + (dest_offset >> 1), ptr, uvlinesize, h >> 1); -} - -inline int ff_h263_round_chroma(int x){ - if (x >= 0) - return (h263_chroma_roundtab[x & 0xf] + ((x >> 3) & ~1)); - else { - x = -x; - return -(h263_chroma_roundtab[x & 0xf] + ((x >> 3) & ~1)); - } -} - -/** - * h263 chorma 4mv motion compensation. - */ -static inline void chroma_4mv_motion(MpegEncContext *s, +static inline void chroma_4mv_motion_lowres(MpegEncContext *s, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture, - op_pixels_func *pix_op, + h264_chroma_mc_func *pix_op, int mx, int my){ - int dxy, emu=0, src_x, src_y, offset; + const int lowres= s->avctx->lowres; + const int block_s= 8>>lowres; + const int s_mask= (2<h_edge_pos >> (lowres+1); + const int v_edge_pos = s->v_edge_pos >> (lowres+1); + int emu=0, src_x, src_y, offset, sx, sy; uint8_t *ptr; - + + if(s->quarter_sample){ + mx/=2; + my/=2; + } + /* In case of 8X8, we construct a single chroma motion vector with a special rounding */ mx= ff_h263_round_chroma(mx); my= ff_h263_round_chroma(my); - - dxy = ((my & 1) << 1) | (mx & 1); - mx >>= 1; - my >>= 1; - src_x = s->mb_x * 8 + mx; - src_y = s->mb_y * 8 + my; - src_x = clip(src_x, -8, s->width/2); - if (src_x == s->width/2) - dxy &= ~1; - src_y = clip(src_y, -8, s->height/2); - if (src_y == s->height/2) - dxy &= ~2; - - offset = (src_y * (s->uvlinesize)) + src_x; + sx= mx & s_mask; + sy= my & s_mask; + src_x = s->mb_x*block_s + (mx >> (lowres+1)); + src_y = s->mb_y*block_s + (my >> (lowres+1)); + + offset = src_y * s->uvlinesize + src_x; ptr = ref_picture[1] + offset; if(s->flags&CODEC_FLAG_EMU_EDGE){ - if( (unsigned)src_x > (s->h_edge_pos>>1) - (dxy &1) - 8 - || (unsigned)src_y > (s->v_edge_pos>>1) - (dxy>>1) - 8){ - ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1); + if( (unsigned)src_x > h_edge_pos - (!!sx) - block_s + || (unsigned)src_y > v_edge_pos - (!!sy) - block_s){ + ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos); ptr= s->edge_emu_buffer; emu=1; } } - pix_op[dxy](dest_cb, ptr, s->uvlinesize, 8); + sx <<= 2 - lowres; + sy <<= 2 - lowres; + pix_op[lowres](dest_cb, ptr, s->uvlinesize, block_s, sx, sy); ptr = ref_picture[2] + offset; if(emu){ - ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1); + ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos); ptr= s->edge_emu_buffer; } - pix_op[dxy](dest_cr, ptr, s->uvlinesize, 8); + pix_op[lowres](dest_cr, ptr, s->uvlinesize, block_s, sx, sy); } /** - * motion compesation of a single macroblock + * motion compensation of a single macroblock * @param s context * @param dest_y luma destination pointer * @param dest_cb chroma cb/u destination pointer @@ -2576,308 +1536,125 @@ static inline void chroma_4mv_motion(MpegEncContext *s, * @param dir direction (0->forward, 1->backward) * @param ref_picture array[3] of pointers to the 3 planes of the reference picture * @param pic_op halfpel motion compensation function (average or put normally) - * @param pic_op qpel motion compensation function (average or put normally) * the motion vectors are taken from s->mv and the MV type from s->mv_type */ -static inline void MPV_motion(MpegEncContext *s, +static inline void MPV_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, - int dir, uint8_t **ref_picture, - op_pixels_func (*pix_op)[4], qpel_mc_func (*qpix_op)[16]) + int dir, uint8_t **ref_picture, + h264_chroma_mc_func *pix_op) { - int dxy, mx, my, src_x, src_y, motion_x, motion_y; + int mx, my; int mb_x, mb_y, i; - uint8_t *ptr, *dest; + const int lowres= s->avctx->lowres; + const int block_s= 8>>lowres; mb_x = s->mb_x; mb_y = s->mb_y; - if(s->obmc && s->pict_type != B_TYPE){ - int16_t mv_cache[4][4][2]; - const int xy= s->mb_x + s->mb_y*s->mb_stride; - const int mot_stride= s->mb_width*2 + 2; - const int mot_xy= 1 + mb_x*2 + (mb_y*2 + 1)*mot_stride; - - assert(!s->mb_skiped); - - memcpy(mv_cache[1][1], s->current_picture.motion_val[0][mot_xy ], sizeof(int16_t)*4); - memcpy(mv_cache[2][1], s->current_picture.motion_val[0][mot_xy+mot_stride], sizeof(int16_t)*4); - memcpy(mv_cache[3][1], s->current_picture.motion_val[0][mot_xy+mot_stride], sizeof(int16_t)*4); - - if(mb_y==0 || IS_INTRA(s->current_picture.mb_type[xy-s->mb_stride])){ - memcpy(mv_cache[0][1], mv_cache[1][1], sizeof(int16_t)*4); - }else{ - memcpy(mv_cache[0][1], s->current_picture.motion_val[0][mot_xy-mot_stride], sizeof(int16_t)*4); - } - - if(mb_x==0 || IS_INTRA(s->current_picture.mb_type[xy-1])){ - *(int32_t*)mv_cache[1][0]= *(int32_t*)mv_cache[1][1]; - *(int32_t*)mv_cache[2][0]= *(int32_t*)mv_cache[2][1]; - }else{ - *(int32_t*)mv_cache[1][0]= *(int32_t*)s->current_picture.motion_val[0][mot_xy-1]; - *(int32_t*)mv_cache[2][0]= *(int32_t*)s->current_picture.motion_val[0][mot_xy-1+mot_stride]; - } - - if(mb_x+1>=s->mb_width || IS_INTRA(s->current_picture.mb_type[xy+1])){ - *(int32_t*)mv_cache[1][3]= *(int32_t*)mv_cache[1][2]; - *(int32_t*)mv_cache[2][3]= *(int32_t*)mv_cache[2][2]; - }else{ - *(int32_t*)mv_cache[1][3]= *(int32_t*)s->current_picture.motion_val[0][mot_xy+2]; - *(int32_t*)mv_cache[2][3]= *(int32_t*)s->current_picture.motion_val[0][mot_xy+2+mot_stride]; - } - - mx = 0; - my = 0; - for(i=0;i<4;i++) { - const int x= (i&1)+1; - const int y= (i>>1)+1; - int16_t mv[5][2]= { - {mv_cache[y][x ][0], mv_cache[y][x ][1]}, - {mv_cache[y-1][x][0], mv_cache[y-1][x][1]}, - {mv_cache[y][x-1][0], mv_cache[y][x-1][1]}, - {mv_cache[y][x+1][0], mv_cache[y][x+1][1]}, - {mv_cache[y+1][x][0], mv_cache[y+1][x][1]}}; - //FIXME cleanup - obmc_motion(s, dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize, - ref_picture[0], - mb_x * 16 + (i & 1) * 8, mb_y * 16 + (i >>1) * 8, - pix_op[1], - mv); - - mx += mv[0][0]; - my += mv[0][1]; - } - if(!(s->flags&CODEC_FLAG_GRAY)) - chroma_4mv_motion(s, dest_cb, dest_cr, ref_picture, pix_op[1], mx, my); - - return; - } - switch(s->mv_type) { case MV_TYPE_16X16: -#ifdef CONFIG_RISKY - if(s->mcsel){ - if(s->real_sprite_warping_points==1){ - gmc1_motion(s, dest_y, dest_cb, dest_cr, 0, - ref_picture, 0); - }else{ - gmc_motion(s, dest_y, dest_cb, dest_cr, 0, - ref_picture, 0); - } - }else if(s->quarter_sample){ - qpel_motion(s, dest_y, dest_cb, dest_cr, 0, - ref_picture, 0, - 0, pix_op, qpix_op, - s->mv[dir][0][0], s->mv[dir][0][1], 16); - }else if(s->mspel){ - ff_mspel_motion(s, dest_y, dest_cb, dest_cr, - ref_picture, pix_op, - s->mv[dir][0][0], s->mv[dir][0][1], 16); - }else -#endif - { - mpeg_motion(s, dest_y, dest_cb, dest_cr, 0, - ref_picture, 0, - 0, pix_op, - s->mv[dir][0][0], s->mv[dir][0][1], 16); - } + mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr, + 0, 0, 0, + ref_picture, pix_op, + s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s); break; case MV_TYPE_8X8: mx = 0; my = 0; - if(s->quarter_sample){ for(i=0;i<4;i++) { - motion_x = s->mv[dir][i][0]; - motion_y = s->mv[dir][i][1]; - - dxy = ((motion_y & 3) << 2) | (motion_x & 3); - src_x = mb_x * 16 + (motion_x >> 2) + (i & 1) * 8; - src_y = mb_y * 16 + (motion_y >> 2) + (i >>1) * 8; - - /* WARNING: do no forget half pels */ - src_x = clip(src_x, -16, s->width); - if (src_x == s->width) - dxy &= ~3; - src_y = clip(src_y, -16, s->height); - if (src_y == s->height) - dxy &= ~12; - - ptr = ref_picture[0] + (src_y * s->linesize) + (src_x); - if(s->flags&CODEC_FLAG_EMU_EDGE){ - if( (unsigned)src_x > s->h_edge_pos - (motion_x&3) - 8 - || (unsigned)src_y > s->v_edge_pos - (motion_y&3) - 8 ){ - ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->linesize, 9, 9, src_x, src_y, s->h_edge_pos, s->v_edge_pos); - ptr= s->edge_emu_buffer; - } - } - dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize; - qpix_op[1][dxy](dest, ptr, s->linesize); - - mx += s->mv[dir][i][0]/2; - my += s->mv[dir][i][1]/2; - } - }else{ - for(i=0;i<4;i++) { - hpel_motion(s, dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize, - ref_picture[0], - mb_x * 16 + (i & 1) * 8, mb_y * 16 + (i >>1) * 8, + hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) * s->linesize)*block_s, + ref_picture[0], 0, 0, + (2*mb_x + (i & 1))*block_s, (2*mb_y + (i >>1))*block_s, s->width, s->height, s->linesize, - s->h_edge_pos, s->v_edge_pos, - 8, 8, pix_op[1], + s->h_edge_pos >> lowres, s->v_edge_pos >> lowres, + block_s, block_s, pix_op, s->mv[dir][i][0], s->mv[dir][i][1]); mx += s->mv[dir][i][0]; my += s->mv[dir][i][1]; } - } - if(!(s->flags&CODEC_FLAG_GRAY)) - chroma_4mv_motion(s, dest_cb, dest_cr, ref_picture, pix_op[1], mx, my); + if(!ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY)) + chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, pix_op, mx, my); break; case MV_TYPE_FIELD: if (s->picture_structure == PICT_FRAME) { - if(s->quarter_sample){ - /* top field */ - qpel_motion(s, dest_y, dest_cb, dest_cr, 0, - ref_picture, s->field_select[dir][0] ? s->linesize : 0, - 1, pix_op, qpix_op, - s->mv[dir][0][0], s->mv[dir][0][1], 8); - /* bottom field */ - qpel_motion(s, dest_y, dest_cb, dest_cr, s->linesize, - ref_picture, s->field_select[dir][1] ? s->linesize : 0, - 1, pix_op, qpix_op, - s->mv[dir][1][0], s->mv[dir][1][1], 8); - }else{ - /* top field */ - mpeg_motion(s, dest_y, dest_cb, dest_cr, 0, - ref_picture, s->field_select[dir][0] ? s->linesize : 0, - 1, pix_op, - s->mv[dir][0][0], s->mv[dir][0][1], 8); - /* bottom field */ - mpeg_motion(s, dest_y, dest_cb, dest_cr, s->linesize, - ref_picture, s->field_select[dir][1] ? s->linesize : 0, - 1, pix_op, - s->mv[dir][1][0], s->mv[dir][1][1], 8); - } + /* top field */ + mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr, + 1, 0, s->field_select[dir][0], + ref_picture, pix_op, + s->mv[dir][0][0], s->mv[dir][0][1], block_s); + /* bottom field */ + mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr, + 1, 1, s->field_select[dir][1], + ref_picture, pix_op, + s->mv[dir][1][0], s->mv[dir][1][1], block_s); } else { - int offset; - if(s->picture_structure == s->field_select[dir][0] + 1 || s->pict_type == B_TYPE || s->first_field){ - offset= s->field_select[dir][0] ? s->linesize : 0; - }else{ - ref_picture= s->current_picture.data; - offset= s->field_select[dir][0] ? s->linesize : -s->linesize; - } + if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != FF_B_TYPE && !s->first_field){ + ref_picture= s->current_picture_ptr->data; + } - mpeg_motion(s, dest_y, dest_cb, dest_cr, 0, - ref_picture, offset, - 0, pix_op, - s->mv[dir][0][0], s->mv[dir][0][1], 16); + mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr, + 0, 0, s->field_select[dir][0], + ref_picture, pix_op, + s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s); } break; - case MV_TYPE_16X8:{ - int offset; - uint8_t ** ref2picture; + case MV_TYPE_16X8: + for(i=0; i<2; i++){ + uint8_t ** ref2picture; - if(s->picture_structure == s->field_select[dir][0] + 1 || s->pict_type == B_TYPE || s->first_field){ + if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == FF_B_TYPE || s->first_field){ ref2picture= ref_picture; - offset= s->field_select[dir][0] ? s->linesize : 0; }else{ - ref2picture= s->current_picture.data; - offset= s->field_select[dir][0] ? s->linesize : -s->linesize; - } + ref2picture= s->current_picture_ptr->data; + } - mpeg_motion(s, dest_y, dest_cb, dest_cr, 0, - ref2picture, offset, - 0, pix_op, - s->mv[dir][0][0], s->mv[dir][0][1], 8); + mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr, + 0, 0, s->field_select[dir][i], + ref2picture, pix_op, + s->mv[dir][i][0], s->mv[dir][i][1] + 2*block_s*i, block_s); - - if(s->picture_structure == s->field_select[dir][1] + 1 || s->pict_type == B_TYPE || s->first_field){ - ref2picture= ref_picture; - offset= s->field_select[dir][1] ? s->linesize : 0; - }else{ - ref2picture= s->current_picture.data; - offset= s->field_select[dir][1] ? s->linesize : -s->linesize; - } - // I know it is ugly but this is the only way to fool emu_edge without rewrite mpeg_motion - mpeg_motion(s, dest_y+16*s->linesize, dest_cb+8*s->uvlinesize, dest_cr+8*s->uvlinesize, - 0, - ref2picture, offset, - 0, pix_op, - s->mv[dir][1][0], s->mv[dir][1][1]+16, 8); + dest_y += 2*block_s*s->linesize; + dest_cb+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize; + dest_cr+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize; } - break; case MV_TYPE_DMV: - { - op_pixels_func (*dmv_pix_op)[4]; - int offset; - - dmv_pix_op = s->dsp.put_pixels_tab; - if(s->picture_structure == PICT_FRAME){ - //put top field from top field - mpeg_motion(s, dest_y, dest_cb, dest_cr, 0, - ref_picture, 0, - 1, dmv_pix_op, - s->mv[dir][0][0], s->mv[dir][0][1], 8); - //put bottom field from bottom field - mpeg_motion(s, dest_y, dest_cb, dest_cr, s->linesize, - ref_picture, s->linesize, - 1, dmv_pix_op, - s->mv[dir][0][0], s->mv[dir][0][1], 8); - - dmv_pix_op = s->dsp.avg_pixels_tab; - - //avg top field from bottom field - mpeg_motion(s, dest_y, dest_cb, dest_cr, 0, - ref_picture, s->linesize, - 1, dmv_pix_op, - s->mv[dir][2][0], s->mv[dir][2][1], 8); - //avg bottom field from top field - mpeg_motion(s, dest_y, dest_cb, dest_cr, s->linesize, - ref_picture, 0, - 1, dmv_pix_op, - s->mv[dir][3][0], s->mv[dir][3][1], 8); - + for(i=0; i<2; i++){ + int j; + for(j=0; j<2; j++){ + mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr, + 1, j, j^i, + ref_picture, pix_op, + s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], block_s); + } + pix_op = s->dsp.avg_h264_chroma_pixels_tab; + } }else{ - offset=(s->picture_structure == PICT_BOTTOM_FIELD)? - s->linesize : 0; + for(i=0; i<2; i++){ + mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr, + 0, 0, s->picture_structure != i+1, + ref_picture, pix_op, + s->mv[dir][2*i][0],s->mv[dir][2*i][1],2*block_s); - //put field from the same parity - //same parity is never in the same frame - mpeg_motion(s, dest_y, dest_cb, dest_cr, 0, - ref_picture,offset, - 0,dmv_pix_op, - s->mv[dir][0][0],s->mv[dir][0][1],16); + // after put we make avg of the same block + pix_op = s->dsp.avg_h264_chroma_pixels_tab; - // after put we make avg of the same block - dmv_pix_op=s->dsp.avg_pixels_tab; - - //opposite parity is always in the same frame if this is second field - if(!s->first_field){ - ref_picture = s->current_picture.data; - //top field is one linesize from frame beginig - offset=(s->picture_structure == PICT_BOTTOM_FIELD)? - -s->linesize : s->linesize; - }else - offset=(s->picture_structure == PICT_BOTTOM_FIELD)? - 0 : s->linesize; - - //avg field from the opposite parity - mpeg_motion(s, dest_y, dest_cb, dest_cr,0, - ref_picture, offset, - 0,dmv_pix_op, - s->mv[dir][2][0],s->mv[dir][2][1],16); + //opposite parity is always in the same frame if this is second field + if(!s->first_field){ + ref_picture = s->current_picture_ptr->data; + } + } } - } break; default: assert(0); } } - /* put block[] to dest[] */ -static inline void put_dct(MpegEncContext *s, +static inline void put_dct(MpegEncContext *s, DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale) { s->dct_unquantize_intra(s, block, i, qscale); @@ -2885,7 +1662,7 @@ static inline void put_dct(MpegEncContext *s, } /* add block[] to dest[] */ -static inline void add_dct(MpegEncContext *s, +static inline void add_dct(MpegEncContext *s, DCTELEM *block, int i, uint8_t *dest, int line_size) { if (s->block_last_index[i] >= 0) { @@ -2893,7 +1670,7 @@ static inline void add_dct(MpegEncContext *s, } } -static inline void add_dequant_dct(MpegEncContext *s, +static inline void add_dequant_dct(MpegEncContext *s, DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale) { if (s->block_last_index[i] >= 0) { @@ -2908,11 +1685,11 @@ static inline void add_dequant_dct(MpegEncContext *s, */ void ff_clean_intra_table_entries(MpegEncContext *s) { - int wrap = s->block_wrap[0]; + int wrap = s->b8_stride; int xy = s->block_index[0]; - - s->dc_val[0][xy ] = - s->dc_val[0][xy + 1 ] = + + s->dc_val[0][xy ] = + s->dc_val[0][xy + 1 ] = s->dc_val[0][xy + wrap] = s->dc_val[0][xy + 1 + wrap] = 1024; /* ac pred */ @@ -2925,15 +1702,15 @@ void ff_clean_intra_table_entries(MpegEncContext *s) s->coded_block[xy + 1 + wrap] = 0; } /* chroma */ - wrap = s->block_wrap[4]; - xy = s->mb_x + 1 + (s->mb_y + 1) * wrap; + wrap = s->mb_stride; + xy = s->mb_x + s->mb_y * wrap; s->dc_val[1][xy] = s->dc_val[2][xy] = 1024; /* ac pred */ memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t)); memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t)); - - s->mbintra_table[s->mb_x + s->mb_y*s->mb_stride]= 0; + + s->mbintra_table[xy]= 0; } /* generic function called after a macroblock has been parsed by the @@ -2946,7 +1723,9 @@ void ff_clean_intra_table_entries(MpegEncContext *s) s->mv : motion vector s->interlaced_dct : true if interlaced dct used (mpeg2) */ -void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) +static av_always_inline +void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64], + int lowres_flag, int is_mpeg12) { int mb_x, mb_y; const int mb_xy = s->mb_y * s->mb_stride + s->mb_x; @@ -2960,11 +1739,20 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) mb_x = s->mb_x; mb_y = s->mb_y; + if(s->avctx->debug&FF_DEBUG_DCT_COEFF) { + /* save DCT coefficients */ + int i,j; + DCTELEM *dct = &s->current_picture.dct_coeff[mb_xy*64*6]; + for(i=0; i<6; i++) + for(j=0; j<64; j++) + *dct++ = block[i][s->dsp.idct_permutation[j]]; + } + s->current_picture.qscale_table[mb_xy]= s->qscale; /* update DC predictors for P macroblocks */ if (!s->mb_intra) { - if (s->h263_pred || s->h263_aic) { + if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) { if(s->mbintra_table[mb_xy]) ff_clean_intra_table_entries(s); } else { @@ -2973,17 +1761,18 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) s->last_dc[2] = 128 << s->intra_dc_precision; } } - else if (s->h263_pred || s->h263_aic) + else if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) s->mbintra_table[mb_xy]=1; - if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==B_TYPE))) { //FIXME precalc + if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==FF_B_TYPE) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc uint8_t *dest_y, *dest_cb, *dest_cr; int dct_linesize, dct_offset; op_pixels_func (*op_pix)[4]; qpel_mc_func (*op_qpix)[16]; - const int linesize= s->current_picture.linesize[0]; //not s->linesize as this woulnd be wrong for field pics + const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics const int uvlinesize= s->current_picture.linesize[1]; - const int readable= s->pict_type != B_TYPE || s->encoding || s->avctx->draw_horiz_band; + const int readable= s->pict_type != FF_B_TYPE || s->encoding || s->avctx->draw_horiz_band || lowres_flag; + const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8; /* avoid copy if macroblock skipped in last frame too */ /* skip only during decoding as we might trash the buffers during encoding a bit */ @@ -2993,11 +1782,11 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) assert(age); - if (s->mb_skiped) { - s->mb_skiped= 0; - assert(s->pict_type!=I_TYPE); - - (*mbskip_ptr) ++; /* indicate that this time we skiped it */ + if (s->mb_skipped) { + s->mb_skipped= 0; + assert(s->pict_type!=FF_I_TYPE); + + (*mbskip_ptr) ++; /* indicate that this time we skipped it */ if(*mbskip_ptr >99) *mbskip_ptr= 99; /* if previous was skipped too, then nothing to do ! */ @@ -3012,191 +1801,182 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) } } - if (s->interlaced_dct) { - dct_linesize = linesize * 2; - dct_offset = linesize; - } else { - dct_linesize = linesize; - dct_offset = linesize * 8; - } + dct_linesize = linesize << s->interlaced_dct; + dct_offset =(s->interlaced_dct)? linesize : linesize*block_size; + if(readable){ dest_y= s->dest[0]; dest_cb= s->dest[1]; dest_cr= s->dest[2]; }else{ - dest_y = s->edge_emu_buffer+32; //FIXME cleanup scratchpad pointers - dest_cb= s->edge_emu_buffer+48; - dest_cr= s->edge_emu_buffer+56; + dest_y = s->b_scratchpad; + dest_cb= s->b_scratchpad+16*linesize; + dest_cr= s->b_scratchpad+32*linesize; } + if (!s->mb_intra) { /* motion handling */ - /* decoding or more than one mb_type (MC was allready done otherwise) */ + /* decoding or more than one mb_type (MC was already done otherwise) */ if(!s->encoding){ - if ((!s->no_rounding) || s->pict_type==B_TYPE){ - op_pix = s->dsp.put_pixels_tab; - op_qpix= s->dsp.put_qpel_pixels_tab; - }else{ - op_pix = s->dsp.put_no_rnd_pixels_tab; - op_qpix= s->dsp.put_no_rnd_qpel_pixels_tab; - } + if(lowres_flag){ + h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab; - if (s->mv_dir & MV_DIR_FORWARD) { - MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix); - op_pix = s->dsp.avg_pixels_tab; - op_qpix= s->dsp.avg_qpel_pixels_tab; - } - if (s->mv_dir & MV_DIR_BACKWARD) { - MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix); + if (s->mv_dir & MV_DIR_FORWARD) { + MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix); + op_pix = s->dsp.avg_h264_chroma_pixels_tab; + } + if (s->mv_dir & MV_DIR_BACKWARD) { + MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix); + } + }else{ + op_qpix= s->me.qpel_put; + if ((!s->no_rounding) || s->pict_type==FF_B_TYPE){ + op_pix = s->dsp.put_pixels_tab; + }else{ + op_pix = s->dsp.put_no_rnd_pixels_tab; + } + if (s->mv_dir & MV_DIR_FORWARD) { + MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix); + op_pix = s->dsp.avg_pixels_tab; + op_qpix= s->me.qpel_avg; + } + if (s->mv_dir & MV_DIR_BACKWARD) { + MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix); + } } } /* skip dequant / idct if we are really late ;) */ - if(s->hurry_up>1) return; + if(s->hurry_up>1) goto skip_idct; + if(s->avctx->skip_idct){ + if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == FF_B_TYPE) + ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != FF_I_TYPE) + || s->avctx->skip_idct >= AVDISCARD_ALL) + goto skip_idct; + } /* add dct residue */ if(s->encoding || !( s->h263_msmpeg4 || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){ - add_dequant_dct(s, block[0], 0, dest_y, dct_linesize, s->qscale); - add_dequant_dct(s, block[1], 1, dest_y + 8, dct_linesize, s->qscale); - add_dequant_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize, s->qscale); - add_dequant_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize, s->qscale); + add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale); + add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale); + add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale); + add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale); - if(!(s->flags&CODEC_FLAG_GRAY)){ - add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale); - add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale); + if(!ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY)){ + if (s->chroma_y_shift){ + add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale); + add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale); + }else{ + dct_linesize >>= 1; + dct_offset >>=1; + add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale); + add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale); + add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale); + add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale); + } } - } else if(s->codec_id != CODEC_ID_WMV2){ - add_dct(s, block[0], 0, dest_y, dct_linesize); - add_dct(s, block[1], 1, dest_y + 8, dct_linesize); - add_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize); - add_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize); + } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){ + add_dct(s, block[0], 0, dest_y , dct_linesize); + add_dct(s, block[1], 1, dest_y + block_size, dct_linesize); + add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize); + add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize); - if(!(s->flags&CODEC_FLAG_GRAY)){ - add_dct(s, block[4], 4, dest_cb, uvlinesize); - add_dct(s, block[5], 5, dest_cr, uvlinesize); - } - } -#ifdef CONFIG_RISKY - else{ + if(!ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY)){ + if(s->chroma_y_shift){//Chroma420 + add_dct(s, block[4], 4, dest_cb, uvlinesize); + add_dct(s, block[5], 5, dest_cr, uvlinesize); + }else{ + //chroma422 + dct_linesize = uvlinesize << s->interlaced_dct; + dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8; + + add_dct(s, block[4], 4, dest_cb, dct_linesize); + add_dct(s, block[5], 5, dest_cr, dct_linesize); + add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize); + add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize); + if(!s->chroma_x_shift){//Chroma444 + add_dct(s, block[8], 8, dest_cb+8, dct_linesize); + add_dct(s, block[9], 9, dest_cr+8, dct_linesize); + add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize); + add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize); + } + } + }//fi gray + } + else if (ENABLE_WMV2) { ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr); } -#endif } else { /* dct only in intra block */ if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){ - put_dct(s, block[0], 0, dest_y, dct_linesize, s->qscale); - put_dct(s, block[1], 1, dest_y + 8, dct_linesize, s->qscale); - put_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize, s->qscale); - put_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize, s->qscale); + put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale); + put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale); + put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale); + put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale); - if(!(s->flags&CODEC_FLAG_GRAY)){ - put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale); - put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale); + if(!ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY)){ + if(s->chroma_y_shift){ + put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale); + put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale); + }else{ + dct_offset >>=1; + dct_linesize >>=1; + put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale); + put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale); + put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale); + put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale); + } } }else{ - s->dsp.idct_put(dest_y , dct_linesize, block[0]); - s->dsp.idct_put(dest_y + 8, dct_linesize, block[1]); - s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]); - s->dsp.idct_put(dest_y + dct_offset + 8, dct_linesize, block[3]); + s->dsp.idct_put(dest_y , dct_linesize, block[0]); + s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]); + s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]); + s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]); - if(!(s->flags&CODEC_FLAG_GRAY)){ - s->dsp.idct_put(dest_cb, uvlinesize, block[4]); - s->dsp.idct_put(dest_cr, uvlinesize, block[5]); - } + if(!ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY)){ + if(s->chroma_y_shift){ + s->dsp.idct_put(dest_cb, uvlinesize, block[4]); + s->dsp.idct_put(dest_cr, uvlinesize, block[5]); + }else{ + + dct_linesize = uvlinesize << s->interlaced_dct; + dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8; + + s->dsp.idct_put(dest_cb, dct_linesize, block[4]); + s->dsp.idct_put(dest_cr, dct_linesize, block[5]); + s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]); + s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]); + if(!s->chroma_x_shift){//Chroma444 + s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]); + s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]); + s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]); + s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]); + } + } + }//gray } } +skip_idct: if(!readable){ s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16); - s->dsp.put_pixels_tab[1][0](s->dest[1], dest_cb, uvlinesize, 8); - s->dsp.put_pixels_tab[1][0](s->dest[2], dest_cr, uvlinesize, 8); + s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift); + s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift); } } } -#ifdef CONFIG_ENCODERS - -static inline void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold) -{ - static const char tab[64]= - {3,2,2,1,1,1,1,1, - 1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1, - 0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0}; - int score=0; - int run=0; - int i; - DCTELEM *block= s->block[n]; - const int last_index= s->block_last_index[n]; - int skip_dc; - - if(threshold<0){ - skip_dc=0; - threshold= -threshold; - }else - skip_dc=1; - - /* are all which we could set to zero are allready zero? */ - if(last_index<=skip_dc - 1) return; - - for(i=0; i<=last_index; i++){ - const int j = s->intra_scantable.permutated[i]; - const int level = ABS(block[j]); - if(level==1){ - if(skip_dc && i==0) continue; - score+= tab[run]; - run=0; - }else if(level>1){ - return; - }else{ - run++; - } - } - if(score >= threshold) return; - for(i=skip_dc; i<=last_index; i++){ - const int j = s->intra_scantable.permutated[i]; - block[j]=0; - } - if(block[0]) s->block_last_index[n]= 0; - else s->block_last_index[n]= -1; +void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){ +#ifndef CONFIG_SMALL + if(s->out_format == FMT_MPEG1) { + if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1); + else MPV_decode_mb_internal(s, block, 0, 1); + } else +#endif + if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0); + else MPV_decode_mb_internal(s, block, 0, 0); } -static inline void clip_coeffs(MpegEncContext *s, DCTELEM *block, int last_index) -{ - int i; - const int maxlevel= s->max_qcoeff; - const int minlevel= s->min_qcoeff; - int overflow=0; - - if(s->mb_intra){ - i=1; //skip clipping of intra dc - }else - i=0; - - for(;i<=last_index; i++){ - const int j= s->intra_scantable.permutated[i]; - int level = block[j]; - - if (level>maxlevel){ - level=maxlevel; - overflow++; - }else if(levelavctx->mb_decision == FF_MB_DECISION_SIMPLE) - av_log(s->avctx, AV_LOG_INFO, "warning, cliping %d dct coefficents to %d..%d\n", overflow, minlevel, maxlevel); -} - -#endif //CONFIG_ENCODERS - /** * * @param h is the normal height, this will be reduced automatically if needed for the last row @@ -3205,31 +1985,31 @@ void ff_draw_horiz_band(MpegEncContext *s, int y, int h){ if (s->avctx->draw_horiz_band) { AVFrame *src; int offset[4]; - + if(s->picture_structure != PICT_FRAME){ h <<= 1; y <<= 1; if(s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return; } - h= FFMIN(h, s->height - y); + h= FFMIN(h, s->avctx->height - y); - if(s->pict_type==B_TYPE || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER)) + if(s->pict_type==FF_B_TYPE || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER)) src= (AVFrame*)s->current_picture_ptr; else if(s->last_picture_ptr) src= (AVFrame*)s->last_picture_ptr; else return; - - if(s->pict_type==B_TYPE && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){ + + if(s->pict_type==FF_B_TYPE && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){ offset[0]= offset[1]= offset[2]= offset[3]= 0; }else{ - offset[0]= y * s->linesize;; - offset[1]= - offset[2]= (y>>1) * s->uvlinesize;; + offset[0]= y * s->linesize; + offset[1]= + offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize; offset[3]= 0; } @@ -3241,1767 +2021,65 @@ void ff_draw_horiz_band(MpegEncContext *s, int y, int h){ } void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename - const int linesize= s->current_picture.linesize[0]; //not s->linesize as this woulnd be wrong for field pics + const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics const int uvlinesize= s->current_picture.linesize[1]; - - s->block_index[0]= s->block_wrap[0]*(s->mb_y*2 + 1) - 1 + s->mb_x*2; - s->block_index[1]= s->block_wrap[0]*(s->mb_y*2 + 1) + s->mb_x*2; - s->block_index[2]= s->block_wrap[0]*(s->mb_y*2 + 2) - 1 + s->mb_x*2; - s->block_index[3]= s->block_wrap[0]*(s->mb_y*2 + 2) + s->mb_x*2; - s->block_index[4]= s->block_wrap[4]*(s->mb_y + 1) + s->block_wrap[0]*(s->mb_height*2 + 2) + s->mb_x; - s->block_index[5]= s->block_wrap[4]*(s->mb_y + 1 + s->mb_height + 2) + s->block_wrap[0]*(s->mb_height*2 + 2) + s->mb_x; - - if(s->pict_type==B_TYPE && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME){ - s->dest[0] = s->current_picture.data[0] + s->mb_x * 16 - 16; - s->dest[1] = s->current_picture.data[1] + s->mb_x * 8 - 8; - s->dest[2] = s->current_picture.data[2] + s->mb_x * 8 - 8; - }else{ - s->dest[0] = s->current_picture.data[0] + (s->mb_y * 16* linesize ) + s->mb_x * 16 - 16; - s->dest[1] = s->current_picture.data[1] + (s->mb_y * 8 * uvlinesize) + s->mb_x * 8 - 8; - s->dest[2] = s->current_picture.data[2] + (s->mb_y * 8 * uvlinesize) + s->mb_x * 8 - 8; - } -} + const int mb_size= 4 - s->avctx->lowres; -#ifdef CONFIG_ENCODERS + s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2; + s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2; + s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2; + s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2; + s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1; + s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1; + //block_index is not used by mpeg2, so it is not affected by chroma_format -static void encode_mb(MpegEncContext *s, int motion_x, int motion_y) -{ - const int mb_x= s->mb_x; - const int mb_y= s->mb_y; - int i; - int skip_dct[6]; - int dct_offset = s->linesize*8; //default for progressive frames - - for(i=0; i<6; i++) skip_dct[i]=0; - - if(s->adaptive_quant){ - const int last_qp= s->qscale; - const int mb_xy= mb_x + mb_y*s->mb_stride; + s->dest[0] = s->current_picture.data[0] + ((s->mb_x - 1) << mb_size); + s->dest[1] = s->current_picture.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift)); + s->dest[2] = s->current_picture.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift)); - s->lambda= s->lambda_table[mb_xy]; - update_qscale(s); - - if(!(s->flags&CODEC_FLAG_QP_RD)){ - s->dquant= s->qscale - last_qp; - - if(s->out_format==FMT_H263) - s->dquant= clip(s->dquant, -2, 2); //FIXME RD - - if(s->codec_id==CODEC_ID_MPEG4){ - if(!s->mb_intra){ - if((s->mv_dir&MV_DIRECT) || s->mv_type==MV_TYPE_8X8) - s->dquant=0; - } - } - } - ff_set_qscale(s, last_qp + s->dquant); + if(!(s->pict_type==FF_B_TYPE && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME)) + { + s->dest[0] += s->mb_y * linesize << mb_size; + s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift); + s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift); } - - if (s->mb_intra) { - uint8_t *ptr; - int wrap_y; - int emu=0; - - wrap_y = s->linesize; - ptr = s->new_picture.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16; - - if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){ - ff_emulated_edge_mc(s->edge_emu_buffer, ptr, wrap_y, 16, 16, mb_x*16, mb_y*16, s->width, s->height); - ptr= s->edge_emu_buffer; - emu=1; - } - - if(s->flags&CODEC_FLAG_INTERLACED_DCT){ - int progressive_score, interlaced_score; - - s->interlaced_dct=0; - progressive_score= s->dsp.ildct_cmp[4](s, ptr , NULL, wrap_y, 8) - +s->dsp.ildct_cmp[4](s, ptr + wrap_y*8, NULL, wrap_y, 8) - 400; - - if(progressive_score > 0){ - interlaced_score = s->dsp.ildct_cmp[4](s, ptr , NULL, wrap_y*2, 8) - +s->dsp.ildct_cmp[4](s, ptr + wrap_y , NULL, wrap_y*2, 8); - if(progressive_score > interlaced_score){ - s->interlaced_dct=1; - - dct_offset= wrap_y; - wrap_y<<=1; - } - } - } - - s->dsp.get_pixels(s->block[0], ptr , wrap_y); - s->dsp.get_pixels(s->block[1], ptr + 8, wrap_y); - s->dsp.get_pixels(s->block[2], ptr + dct_offset , wrap_y); - s->dsp.get_pixels(s->block[3], ptr + dct_offset + 8, wrap_y); - - if(s->flags&CODEC_FLAG_GRAY){ - skip_dct[4]= 1; - skip_dct[5]= 1; - }else{ - int wrap_c = s->uvlinesize; - ptr = s->new_picture.data[1] + (mb_y * 8 * wrap_c) + mb_x * 8; - if(emu){ - ff_emulated_edge_mc(s->edge_emu_buffer, ptr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1); - ptr= s->edge_emu_buffer; - } - s->dsp.get_pixels(s->block[4], ptr, wrap_c); - - ptr = s->new_picture.data[2] + (mb_y * 8 * wrap_c) + mb_x * 8; - if(emu){ - ff_emulated_edge_mc(s->edge_emu_buffer, ptr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1); - ptr= s->edge_emu_buffer; - } - s->dsp.get_pixels(s->block[5], ptr, wrap_c); - } - }else{ - op_pixels_func (*op_pix)[4]; - qpel_mc_func (*op_qpix)[16]; - uint8_t *dest_y, *dest_cb, *dest_cr; - uint8_t *ptr_y, *ptr_cb, *ptr_cr; - int wrap_y, wrap_c; - int emu=0; - - dest_y = s->dest[0]; - dest_cb = s->dest[1]; - dest_cr = s->dest[2]; - wrap_y = s->linesize; - wrap_c = s->uvlinesize; - ptr_y = s->new_picture.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16; - ptr_cb = s->new_picture.data[1] + (mb_y * 8 * wrap_c) + mb_x * 8; - ptr_cr = s->new_picture.data[2] + (mb_y * 8 * wrap_c) + mb_x * 8; - - if ((!s->no_rounding) || s->pict_type==B_TYPE){ - op_pix = s->dsp.put_pixels_tab; - op_qpix= s->dsp.put_qpel_pixels_tab; - }else{ - op_pix = s->dsp.put_no_rnd_pixels_tab; - op_qpix= s->dsp.put_no_rnd_qpel_pixels_tab; - } - - if (s->mv_dir & MV_DIR_FORWARD) { - MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix); - op_pix = s->dsp.avg_pixels_tab; - op_qpix= s->dsp.avg_qpel_pixels_tab; - } - if (s->mv_dir & MV_DIR_BACKWARD) { - MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix); - } - - if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){ - ff_emulated_edge_mc(s->edge_emu_buffer, ptr_y, wrap_y, 16, 16, mb_x*16, mb_y*16, s->width, s->height); - ptr_y= s->edge_emu_buffer; - emu=1; - } - - if(s->flags&CODEC_FLAG_INTERLACED_DCT){ - int progressive_score, interlaced_score; - - s->interlaced_dct=0; - progressive_score= s->dsp.ildct_cmp[0](s, dest_y , ptr_y , wrap_y, 8) - +s->dsp.ildct_cmp[0](s, dest_y + wrap_y*8, ptr_y + wrap_y*8, wrap_y, 8) - 400; - - if(s->avctx->ildct_cmp == FF_CMP_VSSE) progressive_score -= 400; - - if(progressive_score>0){ - interlaced_score = s->dsp.ildct_cmp[0](s, dest_y , ptr_y , wrap_y*2, 8) - +s->dsp.ildct_cmp[0](s, dest_y + wrap_y , ptr_y + wrap_y , wrap_y*2, 8); - - if(progressive_score > interlaced_score){ - s->interlaced_dct=1; - - dct_offset= wrap_y; - wrap_y<<=1; - } - } - } - - s->dsp.diff_pixels(s->block[0], ptr_y , dest_y , wrap_y); - s->dsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y); - s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset , dest_y + dct_offset , wrap_y); - s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8, dest_y + dct_offset + 8, wrap_y); - - if(s->flags&CODEC_FLAG_GRAY){ - skip_dct[4]= 1; - skip_dct[5]= 1; - }else{ - if(emu){ - ff_emulated_edge_mc(s->edge_emu_buffer, ptr_cb, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1); - ptr_cb= s->edge_emu_buffer; - } - s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c); - if(emu){ - ff_emulated_edge_mc(s->edge_emu_buffer, ptr_cr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1); - ptr_cr= s->edge_emu_buffer; - } - s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c); - } - /* pre quantization */ - if(s->current_picture.mc_mb_var[s->mb_stride*mb_y+ mb_x]<2*s->qscale*s->qscale){ - //FIXME optimize - if(s->dsp.sad[1](NULL, ptr_y , dest_y , wrap_y, 8) < 20*s->qscale) skip_dct[0]= 1; - if(s->dsp.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20*s->qscale) skip_dct[1]= 1; - if(s->dsp.sad[1](NULL, ptr_y +dct_offset , dest_y +dct_offset , wrap_y, 8) < 20*s->qscale) skip_dct[2]= 1; - if(s->dsp.sad[1](NULL, ptr_y +dct_offset+ 8, dest_y +dct_offset+ 8, wrap_y, 8) < 20*s->qscale) skip_dct[3]= 1; - if(s->dsp.sad[1](NULL, ptr_cb , dest_cb , wrap_c, 8) < 20*s->qscale) skip_dct[4]= 1; - if(s->dsp.sad[1](NULL, ptr_cr , dest_cr , wrap_c, 8) < 20*s->qscale) skip_dct[5]= 1; -#if 0 -{ - static int stat[7]; - int num=0; - for(i=0; i<6; i++) - if(skip_dct[i]) num++; - stat[num]++; - - if(s->mb_x==0 && s->mb_y==0){ - for(i=0; i<7; i++){ - printf("%6d %1d\n", stat[i], i); - } - } -} -#endif - } - - } - - /* DCT & quantize */ - if(s->out_format==FMT_MJPEG){ - for(i=0;i<6;i++) { - int overflow; - s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, 8, &overflow); - if (overflow) clip_coeffs(s, s->block[i], s->block_last_index[i]); - } - }else{ - for(i=0;i<6;i++) { - if(!skip_dct[i]){ - int overflow; - s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow); - // FIXME we could decide to change to quantizer instead of clipping - // JS: I don't think that would be a good idea it could lower quality instead - // of improve it. Just INTRADC clipping deserves changes in quantizer - if (overflow) clip_coeffs(s, s->block[i], s->block_last_index[i]); - }else - s->block_last_index[i]= -1; - } - - if(s->luma_elim_threshold && !s->mb_intra) - for(i=0; i<4; i++) - dct_single_coeff_elimination(s, i, s->luma_elim_threshold); - if(s->chroma_elim_threshold && !s->mb_intra) - for(i=4; i<6; i++) - dct_single_coeff_elimination(s, i, s->chroma_elim_threshold); - - if(s->flags & CODEC_FLAG_CBP_RD){ - for(i=0;i<6;i++) { - if(s->block_last_index[i] == -1) - s->coded_score[i]= INT_MAX/256; - } - } - } - - if((s->flags&CODEC_FLAG_GRAY) && s->mb_intra){ - s->block_last_index[4]= - s->block_last_index[5]= 0; - s->block[4][0]= - s->block[5][0]= (1024 + s->c_dc_scale/2)/ s->c_dc_scale; - } - - //non c quantize code returns incorrect block_last_index FIXME - if(s->alternate_scan && s->dct_quantize != dct_quantize_c){ - for(i=0; i<6; i++){ - int j; - if(s->block_last_index[i]>0){ - for(j=63; j>0; j--){ - if(s->block[i][ s->intra_scantable.permutated[j] ]) break; - } - s->block_last_index[i]= j; - } - } - } - - /* huffman encode */ - switch(s->codec_id){ //FIXME funct ptr could be slightly faster - case CODEC_ID_MPEG1VIDEO: - case CODEC_ID_MPEG2VIDEO: - mpeg1_encode_mb(s, s->block, motion_x, motion_y); break; -#ifdef CONFIG_RISKY - case CODEC_ID_MPEG4: - mpeg4_encode_mb(s, s->block, motion_x, motion_y); break; - case CODEC_ID_MSMPEG4V2: - case CODEC_ID_MSMPEG4V3: - case CODEC_ID_WMV1: - msmpeg4_encode_mb(s, s->block, motion_x, motion_y); break; - case CODEC_ID_WMV2: - ff_wmv2_encode_mb(s, s->block, motion_x, motion_y); break; - case CODEC_ID_H263: - case CODEC_ID_H263P: - case CODEC_ID_FLV1: - case CODEC_ID_RV10: - h263_encode_mb(s, s->block, motion_x, motion_y); break; -#endif - case CODEC_ID_MJPEG: - mjpeg_encode_mb(s, s->block); break; - default: - assert(0); - } -} - -#endif //CONFIG_ENCODERS - -/** - * combines the (truncated) bitstream to a complete frame - * @returns -1 if no complete frame could be created - */ -int ff_combine_frame( MpegEncContext *s, int next, uint8_t **buf, int *buf_size){ - ParseContext *pc= &s->parse_context; - -#if 0 - if(pc->overread){ - printf("overread %d, state:%X next:%d index:%d o_index:%d\n", pc->overread, pc->state, next, pc->index, pc->overread_index); - printf("%X %X %X %X\n", (*buf)[0], (*buf)[1],(*buf)[2],(*buf)[3]); - } -#endif - - /* copy overreaded byes from last frame into buffer */ - for(; pc->overread>0; pc->overread--){ - pc->buffer[pc->index++]= pc->buffer[pc->overread_index++]; - } - - pc->last_index= pc->index; - - /* copy into buffer end return */ - if(next == END_NOT_FOUND){ - pc->buffer= av_fast_realloc(pc->buffer, &pc->buffer_size, (*buf_size) + pc->index + FF_INPUT_BUFFER_PADDING_SIZE); - - memcpy(&pc->buffer[pc->index], *buf, *buf_size); - pc->index += *buf_size; - return -1; - } - - *buf_size= - pc->overread_index= pc->index + next; - - /* append to buffer */ - if(pc->index){ - pc->buffer= av_fast_realloc(pc->buffer, &pc->buffer_size, next + pc->index + FF_INPUT_BUFFER_PADDING_SIZE); - - memcpy(&pc->buffer[pc->index], *buf, next + FF_INPUT_BUFFER_PADDING_SIZE ); - pc->index = 0; - *buf= pc->buffer; - } - - /* store overread bytes */ - for(;next < 0; next++){ - pc->state = (pc->state<<8) | pc->buffer[pc->last_index + next]; - pc->overread++; - } - -#if 0 - if(pc->overread){ - printf("overread %d, state:%X next:%d index:%d o_index:%d\n", pc->overread, pc->state, next, pc->index, pc->overread_index); - printf("%X %X %X %X\n", (*buf)[0], (*buf)[1],(*buf)[2],(*buf)[3]); - } -#endif - - return 0; } void ff_mpeg_flush(AVCodecContext *avctx){ int i; MpegEncContext *s = avctx->priv_data; - - if(s==NULL || s->picture==NULL) + + if(s==NULL || s->picture==NULL) return; - + for(i=0; ipicture[i].data[0] && ( s->picture[i].type == FF_BUFFER_TYPE_INTERNAL || s->picture[i].type == FF_BUFFER_TYPE_USER)) avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]); } s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL; - + + s->mb_x= s->mb_y= 0; + s->parse_context.state= -1; s->parse_context.frame_start_found= 0; s->parse_context.overread= 0; s->parse_context.overread_index= 0; s->parse_context.index= 0; s->parse_context.last_index= 0; + s->bitstream_buffer_size=0; + s->pp_time=0; } -#ifdef CONFIG_ENCODERS -void ff_copy_bits(PutBitContext *pb, uint8_t *src, int length) -{ - int bytes= length>>4; - int bits= length&15; - int i; - - if(length==0) return; - - for(i=0; i>(16-bits)); -} - -static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){ - int i; - - memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster then a loop? - - /* mpeg1 */ - d->mb_skip_run= s->mb_skip_run; - for(i=0; i<3; i++) - d->last_dc[i]= s->last_dc[i]; - - /* statistics */ - d->mv_bits= s->mv_bits; - d->i_tex_bits= s->i_tex_bits; - d->p_tex_bits= s->p_tex_bits; - d->i_count= s->i_count; - d->f_count= s->f_count; - d->b_count= s->b_count; - d->skip_count= s->skip_count; - d->misc_bits= s->misc_bits; - d->last_bits= 0; - - d->mb_skiped= 0; - d->qscale= s->qscale; - d->dquant= s->dquant; -} - -static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){ - int i; - - memcpy(d->mv, s->mv, 2*4*2*sizeof(int)); - memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster then a loop? - - /* mpeg1 */ - d->mb_skip_run= s->mb_skip_run; - for(i=0; i<3; i++) - d->last_dc[i]= s->last_dc[i]; - - /* statistics */ - d->mv_bits= s->mv_bits; - d->i_tex_bits= s->i_tex_bits; - d->p_tex_bits= s->p_tex_bits; - d->i_count= s->i_count; - d->f_count= s->f_count; - d->b_count= s->b_count; - d->skip_count= s->skip_count; - d->misc_bits= s->misc_bits; - - d->mb_intra= s->mb_intra; - d->mb_skiped= s->mb_skiped; - d->mv_type= s->mv_type; - d->mv_dir= s->mv_dir; - d->pb= s->pb; - if(s->data_partitioning){ - d->pb2= s->pb2; - d->tex_pb= s->tex_pb; - } - d->block= s->block; - for(i=0; i<6; i++) - d->block_last_index[i]= s->block_last_index[i]; - d->interlaced_dct= s->interlaced_dct; - d->qscale= s->qscale; -} - -static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type, - PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2], - int *dmin, int *next_block, int motion_x, int motion_y) -{ - int score; - uint8_t *dest_backup[3]; - - copy_context_before_encode(s, backup, type); - - s->block= s->blocks[*next_block]; - s->pb= pb[*next_block]; - if(s->data_partitioning){ - s->pb2 = pb2 [*next_block]; - s->tex_pb= tex_pb[*next_block]; - } - - if(*next_block){ - memcpy(dest_backup, s->dest, sizeof(s->dest)); - s->dest[0] = s->me.scratchpad; - s->dest[1] = s->me.scratchpad + 16; - s->dest[2] = s->me.scratchpad + 16 + 8; - assert(2*s->uvlinesize == s->linesize); //should be no prob for encoding - assert(s->linesize >= 64); //FIXME - } - - encode_mb(s, motion_x, motion_y); - - score= get_bit_count(&s->pb); - if(s->data_partitioning){ - score+= get_bit_count(&s->pb2); - score+= get_bit_count(&s->tex_pb); - } - - if(s->avctx->mb_decision == FF_MB_DECISION_RD){ - MPV_decode_mb(s, s->block); - - score *= s->lambda2; - score += sse_mb(s) << FF_LAMBDA_SHIFT; - } - - if(*next_block){ - memcpy(s->dest, dest_backup, sizeof(s->dest)); - } - - if(score<*dmin){ - *dmin= score; - *next_block^=1; - - copy_context_after_encode(best, s, type); - } -} - -static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){ - uint32_t *sq = squareTbl + 256; - int acc=0; - int x,y; - - if(w==16 && h==16) - return s->dsp.sse[0](NULL, src1, src2, stride, 16); - else if(w==8 && h==8) - return s->dsp.sse[1](NULL, src1, src2, stride, 8); - - for(y=0; y=0); - - return acc; -} - -static int sse_mb(MpegEncContext *s){ - int w= 16; - int h= 16; - - if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16; - if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16; - - if(w==16 && h==16) - return s->dsp.sse[0](NULL, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16) - +s->dsp.sse[1](NULL, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8) - +s->dsp.sse[1](NULL, s->new_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8); - else - return sse(s, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize) - +sse(s, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize) - +sse(s, s->new_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize); -} - -static void encode_picture(MpegEncContext *s, int picture_number) -{ - int mb_x, mb_y, pdif = 0; - int i, j; - int bits; - MpegEncContext best_s, backup_s; - uint8_t bit_buf[2][3000]; - uint8_t bit_buf2[2][3000]; - uint8_t bit_buf_tex[2][3000]; - PutBitContext pb[2], pb2[2], tex_pb[2]; - - for(i=0; i<2; i++){ - init_put_bits(&pb [i], bit_buf [i], 3000); - init_put_bits(&pb2 [i], bit_buf2 [i], 3000); - init_put_bits(&tex_pb[i], bit_buf_tex[i], 3000); - } - - s->picture_number = picture_number; - - /* Reset the average MB variance */ - s->current_picture.mb_var_sum = 0; - s->current_picture.mc_mb_var_sum = 0; - -#ifdef CONFIG_RISKY - /* we need to initialize some time vars before we can encode b-frames */ - // RAL: Condition added for MPEG1VIDEO - if (s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->h263_msmpeg4)) - ff_set_mpeg4_time(s, s->picture_number); -#endif - - s->scene_change_score=0; - - s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME ratedistoration - - if(s->pict_type==I_TYPE){ - if(s->msmpeg4_version >= 3) s->no_rounding=1; - else s->no_rounding=0; - }else if(s->pict_type!=B_TYPE){ - if(s->flipflop_rounding || s->codec_id == CODEC_ID_H263P || s->codec_id == CODEC_ID_MPEG4) - s->no_rounding ^= 1; - } - - /* Estimate motion for every MB */ - s->mb_intra=0; //for the rate distoration & bit compare functions - if(s->pict_type != I_TYPE){ - if(s->pict_type != B_TYPE){ - if((s->avctx->pre_me && s->last_non_b_pict_type==I_TYPE) || s->avctx->pre_me==2){ - s->me.pre_pass=1; - s->me.dia_size= s->avctx->pre_dia_size; - - for(mb_y=s->mb_height-1; mb_y >=0 ; mb_y--) { - s->mb_y = mb_y; - for(mb_x=s->mb_width-1; mb_x >=0 ; mb_x--) { - s->mb_x = mb_x; - ff_pre_estimate_p_frame_motion(s, mb_x, mb_y); - } - } - s->me.pre_pass=0; - } - } - - s->me.dia_size= s->avctx->dia_size; - for(mb_y=0; mb_y < s->mb_height; mb_y++) { - s->mb_y = mb_y; - s->block_index[0]= s->block_wrap[0]*(mb_y*2 + 1) - 1; - s->block_index[1]= s->block_wrap[0]*(mb_y*2 + 1); - s->block_index[2]= s->block_wrap[0]*(mb_y*2 + 2) - 1; - s->block_index[3]= s->block_wrap[0]*(mb_y*2 + 2); - for(mb_x=0; mb_x < s->mb_width; mb_x++) { - s->mb_x = mb_x; - s->block_index[0]+=2; - s->block_index[1]+=2; - s->block_index[2]+=2; - s->block_index[3]+=2; - - /* compute motion vector & mb_type and store in context */ - if(s->pict_type==B_TYPE) - ff_estimate_b_frame_motion(s, mb_x, mb_y); - else - ff_estimate_p_frame_motion(s, mb_x, mb_y); - } - } - }else /* if(s->pict_type == I_TYPE) */{ - /* I-Frame */ - for(i=0; imb_stride*s->mb_height; i++) - s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA; - - if(!s->fixed_qscale){ - /* finding spatial complexity for I-frame rate control */ - for(mb_y=0; mb_y < s->mb_height; mb_y++) { - for(mb_x=0; mb_x < s->mb_width; mb_x++) { - int xx = mb_x * 16; - int yy = mb_y * 16; - uint8_t *pix = s->new_picture.data[0] + (yy * s->linesize) + xx; - int varc; - int sum = s->dsp.pix_sum(pix, s->linesize); - - varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)(sum*sum))>>8) + 500 + 128)>>8; - - s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc; - s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8; - s->current_picture.mb_var_sum += varc; - } - } - } - } - emms_c(); - - if(s->scene_change_score > s->avctx->scenechange_threshold && s->pict_type == P_TYPE){ - s->pict_type= I_TYPE; - for(i=0; imb_stride*s->mb_height; i++) - s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA; -//printf("Scene change detected, encoding as I Frame %d %d\n", s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum); - } - - if(!s->umvplus){ - if(s->pict_type==P_TYPE || s->pict_type==S_TYPE) { - s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER); - - if(s->flags & CODEC_FLAG_INTERLACED_ME){ - int a,b; - a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select - b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I); - s->f_code= FFMAX(s->f_code, FFMAX(a,b)); - } - - ff_fix_long_p_mvs(s); - ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0); - if(s->flags & CODEC_FLAG_INTERLACED_ME){ - for(i=0; i<2; i++){ - for(j=0; j<2; j++) - ff_fix_long_mvs(s, s->p_field_select_table[i], j, - s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0); - } - } - } - - if(s->pict_type==B_TYPE){ - int a, b; - - a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD); - b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR); - s->f_code = FFMAX(a, b); - - a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD); - b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR); - s->b_code = FFMAX(a, b); - - ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1); - ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1); - ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1); - ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1); - if(s->flags & CODEC_FLAG_INTERLACED_ME){ - int dir; - for(dir=0; dir<2; dir++){ - for(i=0; i<2; i++){ - for(j=0; j<2; j++){ - int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I) - : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I); - ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j, - s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1); - } - } - } - } - } - } - - if (!s->fixed_qscale) - s->current_picture.quality = ff_rate_estimate_qscale(s); - - if(s->adaptive_quant){ -#ifdef CONFIG_RISKY - switch(s->codec_id){ - case CODEC_ID_MPEG4: - ff_clean_mpeg4_qscales(s); - break; - case CODEC_ID_H263: - case CODEC_ID_H263P: - case CODEC_ID_FLV1: - ff_clean_h263_qscales(s); - break; - } -#endif - - s->lambda= s->lambda_table[0]; - //FIXME broken - }else - s->lambda= s->current_picture.quality; -//printf("%d %d\n", s->avctx->global_quality, s->current_picture.quality); - update_qscale(s); - - if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==I_TYPE && !(s->flags & CODEC_FLAG_QSCALE)) - s->qscale= 3; //reduce cliping problems - - if (s->out_format == FMT_MJPEG) { - /* for mjpeg, we do include qscale in the matrix */ - s->intra_matrix[0] = ff_mpeg1_default_intra_matrix[0]; - for(i=1;i<64;i++){ - int j= s->dsp.idct_permutation[i]; - - s->intra_matrix[j] = CLAMP_TO_8BIT((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3); - } - convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16, - s->intra_matrix, s->intra_quant_bias, 8, 8); - } - - //FIXME var duplication - s->current_picture.key_frame= s->pict_type == I_TYPE; - s->current_picture.pict_type= s->pict_type; - - if(s->current_picture.key_frame) - s->picture_in_gop_number=0; - - s->last_bits= get_bit_count(&s->pb); - switch(s->out_format) { - case FMT_MJPEG: - mjpeg_picture_header(s); - break; -#ifdef CONFIG_RISKY - case FMT_H263: - if (s->codec_id == CODEC_ID_WMV2) - ff_wmv2_encode_picture_header(s, picture_number); - else if (s->h263_msmpeg4) - msmpeg4_encode_picture_header(s, picture_number); - else if (s->h263_pred) - mpeg4_encode_picture_header(s, picture_number); - else if (s->codec_id == CODEC_ID_RV10) - rv10_encode_picture_header(s, picture_number); - else if (s->codec_id == CODEC_ID_FLV1) - ff_flv_encode_picture_header(s, picture_number); - else - h263_encode_picture_header(s, picture_number); - break; -#endif - case FMT_MPEG1: - mpeg1_encode_picture_header(s, picture_number); - break; - case FMT_H264: - break; - default: - assert(0); - } - bits= get_bit_count(&s->pb); - s->header_bits= bits - s->last_bits; - s->last_bits= bits; - s->mv_bits=0; - s->misc_bits=0; - s->i_tex_bits=0; - s->p_tex_bits=0; - s->i_count=0; - s->f_count=0; - s->b_count=0; - s->skip_count=0; - - for(i=0; i<3; i++){ - /* init last dc values */ - /* note: quant matrix value (8) is implied here */ - s->last_dc[i] = 128; - - s->current_picture_ptr->error[i] = 0; - } - s->mb_skip_run = 0; - memset(s->last_mv, 0, sizeof(s->last_mv)); - - s->last_mv_dir = 0; - -#ifdef CONFIG_RISKY - switch(s->codec_id){ - case CODEC_ID_H263: - case CODEC_ID_H263P: - case CODEC_ID_FLV1: - s->gob_index = ff_h263_get_gob_height(s); - break; - case CODEC_ID_MPEG4: - if(s->partitioned_frame) - ff_mpeg4_init_partitions(s); - break; - } -#endif - - s->resync_mb_x=0; - s->resync_mb_y=0; - s->first_slice_line = 1; - s->ptr_lastgob = s->pb.buf; - for(mb_y=0; mb_y < s->mb_height; mb_y++) { - s->mb_x=0; - s->mb_y= mb_y; - - ff_set_qscale(s, s->qscale); - ff_init_block_index(s); - - for(mb_x=0; mb_x < s->mb_width; mb_x++) { - const int xy= mb_y*s->mb_stride + mb_x; - int mb_type= s->mb_type[xy]; -// int d; - int dmin= INT_MAX; - int dir; - - s->mb_x = mb_x; - ff_update_block_index(s); - - /* write gob / video packet header */ -#ifdef CONFIG_RISKY - if(s->rtp_mode){ - int current_packet_size, is_gob_start; - - current_packet_size= pbBufPtr(&s->pb) - s->ptr_lastgob; - - is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0; - - switch(s->codec_id){ - case CODEC_ID_H263: - case CODEC_ID_H263P: - if(!s->h263_slice_structured) - if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0; - break; - case CODEC_ID_MPEG2VIDEO: - if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1; - case CODEC_ID_MPEG1VIDEO: - if(s->mb_skip_run) is_gob_start=0; - break; - } - - if(is_gob_start){ - if(s->codec_id==CODEC_ID_MPEG4 && s->partitioned_frame){ - ff_mpeg4_merge_partitions(s); - ff_mpeg4_init_partitions(s); - } - - if(s->codec_id==CODEC_ID_MPEG4) - ff_mpeg4_stuffing(&s->pb); - - align_put_bits(&s->pb); - flush_put_bits(&s->pb); - - assert((get_bit_count(&s->pb)&7) == 0); - current_packet_size= pbBufPtr(&s->pb) - s->ptr_lastgob; - - if(s->avctx->error_rate && s->resync_mb_x + s->resync_mb_y > 0){ - int r= get_bit_count(&s->pb)/8 + s->picture_number + s->codec_id + s->mb_x + s->mb_y; - int d= 100 / s->avctx->error_rate; - if(r % d == 0){ - current_packet_size=0; -#ifndef ALT_BITSTREAM_WRITER - s->pb.buf_ptr= s->ptr_lastgob; -#endif - assert(pbBufPtr(&s->pb) == s->ptr_lastgob); - } - } - - if (s->avctx->rtp_callback) - s->avctx->rtp_callback(s->ptr_lastgob, current_packet_size, 0); - - switch(s->codec_id){ - case CODEC_ID_MPEG4: - ff_mpeg4_encode_video_packet_header(s); - ff_mpeg4_clean_buffers(s); - break; - case CODEC_ID_MPEG1VIDEO: - case CODEC_ID_MPEG2VIDEO: - ff_mpeg1_encode_slice_header(s); - ff_mpeg1_clean_buffers(s); - break; - case CODEC_ID_H263: - case CODEC_ID_H263P: - h263_encode_gob_header(s, mb_y); - break; - } - - if(s->flags&CODEC_FLAG_PASS1){ - int bits= get_bit_count(&s->pb); - s->misc_bits+= bits - s->last_bits; - s->last_bits= bits; - } - - s->ptr_lastgob += current_packet_size; - s->first_slice_line=1; - s->resync_mb_x=mb_x; - s->resync_mb_y=mb_y; - } - } -#endif - - if( (s->resync_mb_x == s->mb_x) - && s->resync_mb_y+1 == s->mb_y){ - s->first_slice_line=0; - } - - s->mb_skiped=0; - s->dquant=0; //only for QP_RD - - if(mb_type & (mb_type-1) || (s->flags & CODEC_FLAG_QP_RD)){ // more than 1 MB type possible - int next_block=0; - int pb_bits_count, pb2_bits_count, tex_pb_bits_count; - - copy_context_before_encode(&backup_s, s, -1); - backup_s.pb= s->pb; - best_s.data_partitioning= s->data_partitioning; - best_s.partitioned_frame= s->partitioned_frame; - if(s->data_partitioning){ - backup_s.pb2= s->pb2; - backup_s.tex_pb= s->tex_pb; - } - - if(mb_type&CANDIDATE_MB_TYPE_INTER){ - s->mv_dir = MV_DIR_FORWARD; - s->mv_type = MV_TYPE_16X16; - s->mb_intra= 0; - s->mv[0][0][0] = s->p_mv_table[xy][0]; - s->mv[0][0][1] = s->p_mv_table[xy][1]; - encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb, - &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]); - } - if(mb_type&CANDIDATE_MB_TYPE_INTER_I){ - s->mv_dir = MV_DIR_FORWARD; - s->mv_type = MV_TYPE_FIELD; - s->mb_intra= 0; - for(i=0; i<2; i++){ - j= s->field_select[0][i] = s->p_field_select_table[i][xy]; - s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0]; - s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1]; - } - encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb, - &dmin, &next_block, 0, 0); - } - if(mb_type&CANDIDATE_MB_TYPE_SKIPED){ - s->mv_dir = MV_DIR_FORWARD; - s->mv_type = MV_TYPE_16X16; - s->mb_intra= 0; - s->mv[0][0][0] = 0; - s->mv[0][0][1] = 0; - encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPED, pb, pb2, tex_pb, - &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]); - } - if(mb_type&CANDIDATE_MB_TYPE_INTER4V){ - s->mv_dir = MV_DIR_FORWARD; - s->mv_type = MV_TYPE_8X8; - s->mb_intra= 0; - for(i=0; i<4; i++){ - s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0]; - s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1]; - } - encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb, - &dmin, &next_block, 0, 0); - } - if(mb_type&CANDIDATE_MB_TYPE_FORWARD){ - s->mv_dir = MV_DIR_FORWARD; - s->mv_type = MV_TYPE_16X16; - s->mb_intra= 0; - s->mv[0][0][0] = s->b_forw_mv_table[xy][0]; - s->mv[0][0][1] = s->b_forw_mv_table[xy][1]; - encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb, - &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]); - } - if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){ - s->mv_dir = MV_DIR_BACKWARD; - s->mv_type = MV_TYPE_16X16; - s->mb_intra= 0; - s->mv[1][0][0] = s->b_back_mv_table[xy][0]; - s->mv[1][0][1] = s->b_back_mv_table[xy][1]; - encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb, - &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]); - } - if(mb_type&CANDIDATE_MB_TYPE_BIDIR){ - s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD; - s->mv_type = MV_TYPE_16X16; - s->mb_intra= 0; - s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0]; - s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1]; - s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0]; - s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1]; - encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb, - &dmin, &next_block, 0, 0); - } - if(mb_type&CANDIDATE_MB_TYPE_DIRECT){ - int mx= s->b_direct_mv_table[xy][0]; - int my= s->b_direct_mv_table[xy][1]; - - s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT; - s->mb_intra= 0; -#ifdef CONFIG_RISKY - ff_mpeg4_set_direct_mv(s, mx, my); -#endif - encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb, - &dmin, &next_block, mx, my); - } - if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){ - s->mv_dir = MV_DIR_FORWARD; - s->mv_type = MV_TYPE_FIELD; - s->mb_intra= 0; - for(i=0; i<2; i++){ - j= s->field_select[0][i] = s->b_field_select_table[0][i][xy]; - s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0]; - s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1]; - } - encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb, - &dmin, &next_block, 0, 0); - } - if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){ - s->mv_dir = MV_DIR_BACKWARD; - s->mv_type = MV_TYPE_FIELD; - s->mb_intra= 0; - for(i=0; i<2; i++){ - j= s->field_select[1][i] = s->b_field_select_table[1][i][xy]; - s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0]; - s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1]; - } - encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb, - &dmin, &next_block, 0, 0); - } - if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){ - s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD; - s->mv_type = MV_TYPE_FIELD; - s->mb_intra= 0; - for(dir=0; dir<2; dir++){ - for(i=0; i<2; i++){ - j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy]; - s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0]; - s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1]; - } - } - encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb, - &dmin, &next_block, 0, 0); - } - if(mb_type&CANDIDATE_MB_TYPE_INTRA){ - s->mv_dir = 0; - s->mv_type = MV_TYPE_16X16; - s->mb_intra= 1; - s->mv[0][0][0] = 0; - s->mv[0][0][1] = 0; - encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb, - &dmin, &next_block, 0, 0); - if(s->h263_pred || s->h263_aic){ - if(best_s.mb_intra) - s->mbintra_table[mb_x + mb_y*s->mb_stride]=1; - else - ff_clean_intra_table_entries(s); //old mode? - } - } - - if(s->flags & CODEC_FLAG_QP_RD){ - if(best_s.mv_type==MV_TYPE_16X16 && !(best_s.mv_dir&MV_DIRECT)){ - const int last_qp= backup_s.qscale; - int dquant, dir, qp, dc[6]; - DCTELEM ac[6][16]; - const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0; - - assert(backup_s.dquant == 0); - - //FIXME intra - s->mv_dir= best_s.mv_dir; - s->mv_type = MV_TYPE_16X16; - s->mb_intra= best_s.mb_intra; - s->mv[0][0][0] = best_s.mv[0][0][0]; - s->mv[0][0][1] = best_s.mv[0][0][1]; - s->mv[1][0][0] = best_s.mv[1][0][0]; - s->mv[1][0][1] = best_s.mv[1][0][1]; - - dir= s->pict_type == B_TYPE ? 2 : 1; - if(last_qp + dir > s->avctx->qmax) dir= -dir; - for(dquant= dir; dquant<=2 && dquant>=-2; dquant += dir){ - qp= last_qp + dquant; - if(qp < s->avctx->qmin || qp > s->avctx->qmax) - break; - backup_s.dquant= dquant; - if(s->mb_intra){ - for(i=0; i<6; i++){ - dc[i]= s->dc_val[0][ s->block_index[i] ]; - memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(DCTELEM)*16); - } - } - - encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb, - &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]); - if(best_s.qscale != qp){ - if(s->mb_intra){ - for(i=0; i<6; i++){ - s->dc_val[0][ s->block_index[i] ]= dc[i]; - memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(DCTELEM)*16); - } - } - if(dir > 0 && dquant==dir){ - dquant= 0; - dir= -dir; - }else - break; - } - } - qp= best_s.qscale; - s->current_picture.qscale_table[xy]= qp; - } - } - - copy_context_after_encode(s, &best_s, -1); - - pb_bits_count= get_bit_count(&s->pb); - flush_put_bits(&s->pb); - ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count); - s->pb= backup_s.pb; - - if(s->data_partitioning){ - pb2_bits_count= get_bit_count(&s->pb2); - flush_put_bits(&s->pb2); - ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count); - s->pb2= backup_s.pb2; - - tex_pb_bits_count= get_bit_count(&s->tex_pb); - flush_put_bits(&s->tex_pb); - ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count); - s->tex_pb= backup_s.tex_pb; - } - s->last_bits= get_bit_count(&s->pb); - -#ifdef CONFIG_RISKY - if (s->out_format == FMT_H263 && s->pict_type!=B_TYPE) - ff_h263_update_motion_val(s); -#endif - - if(next_block==0){ - s->dsp.put_pixels_tab[0][0](s->dest[0], s->me.scratchpad , s->linesize ,16); - s->dsp.put_pixels_tab[1][0](s->dest[1], s->me.scratchpad + 16, s->uvlinesize, 8); - s->dsp.put_pixels_tab[1][0](s->dest[2], s->me.scratchpad + 24, s->uvlinesize, 8); - } - - if(s->avctx->mb_decision == FF_MB_DECISION_BITS) - MPV_decode_mb(s, s->block); - } else { - int motion_x, motion_y; - s->mv_type=MV_TYPE_16X16; - // only one MB-Type possible - - switch(mb_type){ - case CANDIDATE_MB_TYPE_INTRA: - s->mv_dir = 0; - s->mb_intra= 1; - motion_x= s->mv[0][0][0] = 0; - motion_y= s->mv[0][0][1] = 0; - break; - case CANDIDATE_MB_TYPE_INTER: - s->mv_dir = MV_DIR_FORWARD; - s->mb_intra= 0; - motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0]; - motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1]; - break; - case CANDIDATE_MB_TYPE_INTER_I: - s->mv_dir = MV_DIR_FORWARD; - s->mv_type = MV_TYPE_FIELD; - s->mb_intra= 0; - for(i=0; i<2; i++){ - j= s->field_select[0][i] = s->p_field_select_table[i][xy]; - s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0]; - s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1]; - } - motion_x = motion_y = 0; - break; - case CANDIDATE_MB_TYPE_INTER4V: - s->mv_dir = MV_DIR_FORWARD; - s->mv_type = MV_TYPE_8X8; - s->mb_intra= 0; - for(i=0; i<4; i++){ - s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0]; - s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1]; - } - motion_x= motion_y= 0; - break; - case CANDIDATE_MB_TYPE_DIRECT: - s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT; - s->mb_intra= 0; - motion_x=s->b_direct_mv_table[xy][0]; - motion_y=s->b_direct_mv_table[xy][1]; -#ifdef CONFIG_RISKY - ff_mpeg4_set_direct_mv(s, motion_x, motion_y); -#endif - break; - case CANDIDATE_MB_TYPE_BIDIR: - s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD; - s->mb_intra= 0; - motion_x=0; - motion_y=0; - s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0]; - s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1]; - s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0]; - s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1]; - break; - case CANDIDATE_MB_TYPE_BACKWARD: - s->mv_dir = MV_DIR_BACKWARD; - s->mb_intra= 0; - motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0]; - motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1]; - break; - case CANDIDATE_MB_TYPE_FORWARD: - s->mv_dir = MV_DIR_FORWARD; - s->mb_intra= 0; - motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0]; - motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1]; -// printf(" %d %d ", motion_x, motion_y); - break; - case CANDIDATE_MB_TYPE_FORWARD_I: - s->mv_dir = MV_DIR_FORWARD; - s->mv_type = MV_TYPE_FIELD; - s->mb_intra= 0; - for(i=0; i<2; i++){ - j= s->field_select[0][i] = s->b_field_select_table[0][i][xy]; - s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0]; - s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1]; - } - motion_x=motion_y=0; - break; - case CANDIDATE_MB_TYPE_BACKWARD_I: - s->mv_dir = MV_DIR_BACKWARD; - s->mv_type = MV_TYPE_FIELD; - s->mb_intra= 0; - for(i=0; i<2; i++){ - j= s->field_select[1][i] = s->b_field_select_table[1][i][xy]; - s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0]; - s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1]; - } - motion_x=motion_y=0; - break; - case CANDIDATE_MB_TYPE_BIDIR_I: - s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD; - s->mv_type = MV_TYPE_FIELD; - s->mb_intra= 0; - for(dir=0; dir<2; dir++){ - for(i=0; i<2; i++){ - j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy]; - s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0]; - s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1]; - } - } - motion_x=motion_y=0; - break; - default: - motion_x=motion_y=0; //gcc warning fix - av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n"); - } - - encode_mb(s, motion_x, motion_y); - - // RAL: Update last macrobloc type - s->last_mv_dir = s->mv_dir; - -#ifdef CONFIG_RISKY - if (s->out_format == FMT_H263 && s->pict_type!=B_TYPE) - ff_h263_update_motion_val(s); -#endif - - MPV_decode_mb(s, s->block); - } - - /* clean the MV table in IPS frames for direct mode in B frames */ - if(s->mb_intra /* && I,P,S_TYPE */){ - s->p_mv_table[xy][0]=0; - s->p_mv_table[xy][1]=0; - } - - if(s->flags&CODEC_FLAG_PSNR){ - int w= 16; - int h= 16; - - if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16; - if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16; - - s->current_picture_ptr->error[0] += sse( - s, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, - s->dest[0], w, h, s->linesize); - s->current_picture_ptr->error[1] += sse( - s, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8, - s->dest[1], w>>1, h>>1, s->uvlinesize); - s->current_picture_ptr->error[2] += sse( - s, s->new_picture .data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8, - s->dest[2], w>>1, h>>1, s->uvlinesize); - } - if(s->loop_filter) - ff_h263_loop_filter(s); -//printf("MB %d %d bits\n", s->mb_x+s->mb_y*s->mb_stride, get_bit_count(&s->pb)); - } - } - emms_c(); - -#ifdef CONFIG_RISKY - if(s->codec_id==CODEC_ID_MPEG4 && s->partitioned_frame) - ff_mpeg4_merge_partitions(s); - - if (s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == I_TYPE) - msmpeg4_encode_ext_header(s); - - if(s->codec_id==CODEC_ID_MPEG4) - ff_mpeg4_stuffing(&s->pb); -#endif - - /* Send the last GOB if RTP */ - if (s->avctx->rtp_callback) { - flush_put_bits(&s->pb); - pdif = pbBufPtr(&s->pb) - s->ptr_lastgob; - /* Call the RTP callback to send the last GOB */ - s->avctx->rtp_callback(s->ptr_lastgob, pdif, 0); - } -} - -#endif //CONFIG_ENCODERS - -static void denoise_dct_c(MpegEncContext *s, DCTELEM *block){ - const int intra= s->mb_intra; - int i; - - s->dct_count[intra]++; - - for(i=0; i<64; i++){ - int level= block[i]; - - if(level){ - if(level>0){ - s->dct_error_sum[intra][i] += level; - level -= s->dct_offset[intra][i]; - if(level<0) level=0; - }else{ - s->dct_error_sum[intra][i] -= level; - level += s->dct_offset[intra][i]; - if(level>0) level=0; - } - block[i]= level; - } - } -} - -#ifdef CONFIG_ENCODERS - -static int dct_quantize_trellis_c(MpegEncContext *s, - DCTELEM *block, int n, - int qscale, int *overflow){ - const int *qmat; - const uint8_t *scantable= s->intra_scantable.scantable; - const uint8_t *perm_scantable= s->intra_scantable.permutated; - int max=0; - unsigned int threshold1, threshold2; - int bias=0; - int run_tab[65]; - int level_tab[65]; - int score_tab[65]; - int survivor[65]; - int survivor_count; - int last_run=0; - int last_level=0; - int last_score= 0; - int last_i; - int coeff[2][64]; - int coeff_count[64]; - int qmul, qadd, start_i, last_non_zero, i, dc; - const int esc_length= s->ac_esc_length; - uint8_t * length; - uint8_t * last_length; - const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6); - - s->dsp.fdct (block); - - if(s->dct_error_sum) - s->denoise_dct(s, block); - qmul= qscale*16; - qadd= ((qscale-1)|1)*8; - - if (s->mb_intra) { - int q; - if (!s->h263_aic) { - if (n < 4) - q = s->y_dc_scale; - else - q = s->c_dc_scale; - q = q << 3; - } else{ - /* For AIC we skip quant/dequant of INTRADC */ - q = 1 << 3; - qadd=0; - } - - /* note: block[0] is assumed to be positive */ - block[0] = (block[0] + (q >> 1)) / q; - start_i = 1; - last_non_zero = 0; - qmat = s->q_intra_matrix[qscale]; - if(s->mpeg_quant || s->out_format == FMT_MPEG1) - bias= 1<<(QMAT_SHIFT-1); - length = s->intra_ac_vlc_length; - last_length= s->intra_ac_vlc_last_length; - } else { - start_i = 0; - last_non_zero = -1; - qmat = s->q_inter_matrix[qscale]; - length = s->inter_ac_vlc_length; - last_length= s->inter_ac_vlc_last_length; - } - last_i= start_i; - - threshold1= (1<=start_i; i--) { - const int j = scantable[i]; - int level = block[j] * qmat[j]; - - if(((unsigned)(level+threshold1))>threshold2){ - last_non_zero = i; - break; - } - } - - for(i=start_i; i<=last_non_zero; i++) { - const int j = scantable[i]; - int level = block[j] * qmat[j]; - -// if( bias+level >= (1<<(QMAT_SHIFT - 3)) -// || bias-level >= (1<<(QMAT_SHIFT - 3))){ - if(((unsigned)(level+threshold1))>threshold2){ - if(level>0){ - level= (bias + level)>>QMAT_SHIFT; - coeff[0][i]= level; - coeff[1][i]= level-1; -// coeff[2][k]= level-2; - }else{ - level= (bias - level)>>QMAT_SHIFT; - coeff[0][i]= -level; - coeff[1][i]= -level+1; -// coeff[2][k]= -level+2; - } - coeff_count[i]= FFMIN(level, 2); - assert(coeff_count[i]); - max |=level; - }else{ - coeff[0][i]= (level>>31)|1; - coeff_count[i]= 1; - } - } - - *overflow= s->max_qcoeff < max; //overflow might have happend - - if(last_non_zero < start_i){ - memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM)); - return last_non_zero; - } - - score_tab[start_i]= 0; - survivor[0]= start_i; - survivor_count= 1; - - for(i=start_i; i<=last_non_zero; i++){ - int level_index, j; - const int dct_coeff= ABS(block[ scantable[i] ]); - const int zero_distoration= dct_coeff*dct_coeff; - int best_score=256*256*256*120; - for(level_index=0; level_index < coeff_count[i]; level_index++){ - int distoration; - int level= coeff[level_index][i]; - const int alevel= ABS(level); - int unquant_coeff; - - assert(level); - - if(s->out_format == FMT_H263){ - unquant_coeff= alevel*qmul + qadd; - }else{ //MPEG1 - j= s->dsp.idct_permutation[ scantable[i] ]; //FIXME optimize - if(s->mb_intra){ - unquant_coeff = (int)( alevel * qscale * s->intra_matrix[j]) >> 3; - unquant_coeff = (unquant_coeff - 1) | 1; - }else{ - unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4; - unquant_coeff = (unquant_coeff - 1) | 1; - } - unquant_coeff<<= 3; - } - - distoration= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distoration; - level+=64; - if((level&(~127)) == 0){ - for(j=survivor_count-1; j>=0; j--){ - int run= i - survivor[j]; - int score= distoration + length[UNI_AC_ENC_INDEX(run, level)]*lambda; - score += score_tab[i-run]; - - if(score < best_score){ - best_score= score; - run_tab[i+1]= run; - level_tab[i+1]= level-64; - } - } - - if(s->out_format == FMT_H263){ - for(j=survivor_count-1; j>=0; j--){ - int run= i - survivor[j]; - int score= distoration + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda; - score += score_tab[i-run]; - if(score < last_score){ - last_score= score; - last_run= run; - last_level= level-64; - last_i= i+1; - } - } - } - }else{ - distoration += esc_length*lambda; - for(j=survivor_count-1; j>=0; j--){ - int run= i - survivor[j]; - int score= distoration + score_tab[i-run]; - - if(score < best_score){ - best_score= score; - run_tab[i+1]= run; - level_tab[i+1]= level-64; - } - } - - if(s->out_format == FMT_H263){ - for(j=survivor_count-1; j>=0; j--){ - int run= i - survivor[j]; - int score= distoration + score_tab[i-run]; - if(score < last_score){ - last_score= score; - last_run= run; - last_level= level-64; - last_i= i+1; - } - } - } - } - } - - score_tab[i+1]= best_score; - - //Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level - if(last_non_zero <= 27){ - for(; survivor_count; survivor_count--){ - if(score_tab[ survivor[survivor_count-1] ] <= best_score) - break; - } - }else{ - for(; survivor_count; survivor_count--){ - if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda) - break; - } - } - - survivor[ survivor_count++ ]= i+1; - } - - if(s->out_format != FMT_H263){ - last_score= 256*256*256*120; - for(i= survivor[0]; i<=last_non_zero + 1; i++){ - int score= score_tab[i]; - if(i) score += lambda*2; //FIXME exacter? - - if(score < last_score){ - last_score= score; - last_i= i; - last_level= level_tab[i]; - last_run= run_tab[i]; - } - } - } - - s->coded_score[n] = last_score; - - dc= ABS(block[0]); - last_non_zero= last_i - 1; - memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM)); - - if(last_non_zero < start_i) - return last_non_zero; - - if(last_non_zero == 0 && start_i == 0){ - int best_level= 0; - int best_score= dc * dc; - - for(i=0; iout_format == FMT_H263){ - unquant_coeff= (alevel*qmul + qadd)>>3; - }else{ //MPEG1 - unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[0])) >> 4; - unquant_coeff = (unquant_coeff - 1) | 1; - } - unquant_coeff = (unquant_coeff + 4) >> 3; - unquant_coeff<<= 3 + 3; - - distortion= (unquant_coeff - dc) * (unquant_coeff - dc); - level+=64; - if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda; - else score= distortion + esc_length*lambda; - - if(score < best_score){ - best_score= score; - best_level= level - 64; - } - } - block[0]= best_level; - s->coded_score[n] = best_score - dc*dc; - if(best_level == 0) return -1; - else return last_non_zero; - } - - i= last_i; - assert(last_level); - - block[ perm_scantable[last_non_zero] ]= last_level; - i -= last_run + 1; - - for(; i>start_i; i -= run_tab[i] + 1){ - block[ perm_scantable[i-1] ]= level_tab[i]; - } - - return last_non_zero; -} - -static int dct_quantize_c(MpegEncContext *s, - DCTELEM *block, int n, - int qscale, int *overflow) -{ - int i, j, level, last_non_zero, q, start_i; - const int *qmat; - const uint8_t *scantable= s->intra_scantable.scantable; - int bias; - int max=0; - unsigned int threshold1, threshold2; - - s->dsp.fdct (block); - - if(s->dct_error_sum) - s->denoise_dct(s, block); - - if (s->mb_intra) { - if (!s->h263_aic) { - if (n < 4) - q = s->y_dc_scale; - else - q = s->c_dc_scale; - q = q << 3; - } else - /* For AIC we skip quant/dequant of INTRADC */ - q = 1 << 3; - - /* note: block[0] is assumed to be positive */ - block[0] = (block[0] + (q >> 1)) / q; - start_i = 1; - last_non_zero = 0; - qmat = s->q_intra_matrix[qscale]; - bias= s->intra_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT); - } else { - start_i = 0; - last_non_zero = -1; - qmat = s->q_inter_matrix[qscale]; - bias= s->inter_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT); - } - threshold1= (1<=start_i;i--) { - j = scantable[i]; - level = block[j] * qmat[j]; - - if(((unsigned)(level+threshold1))>threshold2){ - last_non_zero = i; - break; - }else{ - block[j]=0; - } - } - for(i=start_i; i<=last_non_zero; i++) { - j = scantable[i]; - level = block[j] * qmat[j]; - -// if( bias+level >= (1<= (1<threshold2){ - if(level>0){ - level= (bias + level)>>QMAT_SHIFT; - block[j]= level; - }else{ - level= (bias - level)>>QMAT_SHIFT; - block[j]= -level; - } - max |=level; - }else{ - block[j]=0; - } - } - *overflow= s->max_qcoeff < max; //overflow might have happend - - /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */ - if (s->dsp.idct_permutation_type != FF_NO_IDCT_PERM) - ff_block_permute(block, s->dsp.idct_permutation, scantable, last_non_zero); - - return last_non_zero; -} - -#endif //CONFIG_ENCODERS - -static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s, +static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s, DCTELEM *block, int n, int qscale) { int i, level, nCoeffs; const uint16_t *quant_matrix; nCoeffs= s->block_last_index[n]; - - if (n < 4) + + if (n < 4) block[0] = block[0] * s->y_dc_scale; else block[0] = block[0] * s->c_dc_scale; @@ -5025,14 +2103,14 @@ static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s, } } -static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s, +static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s, DCTELEM *block, int n, int qscale) { int i, level, nCoeffs; const uint16_t *quant_matrix; nCoeffs= s->block_last_index[n]; - + quant_matrix = s->inter_matrix; for(i=0; i<=nCoeffs; i++) { int j= s->intra_scantable.permutated[i]; @@ -5054,7 +2132,7 @@ static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s, } } -static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s, +static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s, DCTELEM *block, int n, int qscale) { int i, level, nCoeffs; @@ -5062,8 +2140,8 @@ static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s, if(s->alternate_scan) nCoeffs= 63; else nCoeffs= s->block_last_index[n]; - - if (n < 4) + + if (n < 4) block[0] = block[0] * s->y_dc_scale; else block[0] = block[0] * s->c_dc_scale; @@ -5084,7 +2162,7 @@ static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s, } } -static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s, +static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s, DCTELEM *block, int n, int qscale) { int i, level, nCoeffs; @@ -5093,7 +2171,40 @@ static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s, if(s->alternate_scan) nCoeffs= 63; else nCoeffs= s->block_last_index[n]; - + + if (n < 4) + block[0] = block[0] * s->y_dc_scale; + else + block[0] = block[0] * s->c_dc_scale; + quant_matrix = s->intra_matrix; + for(i=1;i<=nCoeffs;i++) { + int j= s->intra_scantable.permutated[i]; + level = block[j]; + if (level) { + if (level < 0) { + level = -level; + level = (int)(level * qscale * quant_matrix[j]) >> 3; + level = -level; + } else { + level = (int)(level * qscale * quant_matrix[j]) >> 3; + } + block[j] = level; + sum+=level; + } + } + block[63]^=sum&1; +} + +static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s, + DCTELEM *block, int n, int qscale) +{ + int i, level, nCoeffs; + const uint16_t *quant_matrix; + int sum=-1; + + if(s->alternate_scan) nCoeffs= 63; + else nCoeffs= s->block_last_index[n]; + quant_matrix = s->inter_matrix; for(i=0; i<=nCoeffs; i++) { int j= s->intra_scantable.permutated[i]; @@ -5115,18 +2226,18 @@ static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s, block[63]^=sum&1; } -static void dct_unquantize_h263_intra_c(MpegEncContext *s, +static void dct_unquantize_h263_intra_c(MpegEncContext *s, DCTELEM *block, int n, int qscale) { int i, level, qmul, qadd; int nCoeffs; - + assert(s->block_last_index[n]>=0); - + qmul = qscale << 1; - + if (!s->h263_aic) { - if (n < 4) + if (n < 4) block[0] = block[0] * s->y_dc_scale; else block[0] = block[0] * s->c_dc_scale; @@ -5152,17 +2263,17 @@ static void dct_unquantize_h263_intra_c(MpegEncContext *s, } } -static void dct_unquantize_h263_inter_c(MpegEncContext *s, +static void dct_unquantize_h263_inter_c(MpegEncContext *s, DCTELEM *block, int n, int qscale) { int i, level, qmul, qadd; int nCoeffs; - + assert(s->block_last_index[n]>=0); - + qadd = (qscale - 1) | 1; qmul = qscale << 1; - + nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ]; for(i=0; i<=nCoeffs; i++) { @@ -5178,187 +2289,19 @@ static void dct_unquantize_h263_inter_c(MpegEncContext *s, } } -static const AVOption mpeg4_options[] = +/** + * set qscale and update qscale dependent variables. + */ +void ff_set_qscale(MpegEncContext * s, int qscale) { - AVOPTION_CODEC_INT("bitrate", "desired video bitrate", bit_rate, 4, 240000000, 800000), - AVOPTION_CODEC_INT("ratetol", "number of bits the bitstream is allowed to diverge from the reference" - "the reference can be CBR (for CBR pass1) or VBR (for pass2)", - bit_rate_tolerance, 4, 240000000, 8000), - AVOPTION_CODEC_INT("qmin", "minimum quantizer", qmin, 1, 31, 2), - AVOPTION_CODEC_INT("qmax", "maximum quantizer", qmax, 1, 31, 31), - AVOPTION_CODEC_STRING("rc_eq", "rate control equation", - rc_eq, "tex^qComp,option1,options2", 0), - AVOPTION_CODEC_INT("rc_minrate", "rate control minimum bitrate", - rc_min_rate, 4, 24000000, 0), - AVOPTION_CODEC_INT("rc_maxrate", "rate control maximum bitrate", - rc_max_rate, 4, 24000000, 0), - AVOPTION_CODEC_DOUBLE("rc_buf_aggresivity", "rate control buffer aggresivity", - rc_buffer_aggressivity, 4, 24000000, 0), - AVOPTION_CODEC_DOUBLE("rc_initial_cplx", "initial complexity for pass1 ratecontrol", - rc_initial_cplx, 0., 9999999., 0), - AVOPTION_CODEC_DOUBLE("i_quant_factor", "qscale factor between p and i frames", - i_quant_factor, 0., 0., 0), - AVOPTION_CODEC_DOUBLE("i_quant_offset", "qscale offset between p and i frames", - i_quant_factor, -999999., 999999., 0), - AVOPTION_CODEC_INT("dct_algo", "dct alghorithm", - dct_algo, 0, 5, 0), // fixme - "Auto,FastInt,Int,MMX,MLib,Altivec" - AVOPTION_CODEC_DOUBLE("lumi_masking", "luminance masking", - lumi_masking, 0., 999999., 0), - AVOPTION_CODEC_DOUBLE("temporal_cplx_masking", "temporary complexity masking", - temporal_cplx_masking, 0., 999999., 0), - AVOPTION_CODEC_DOUBLE("spatial_cplx_masking", "spatial complexity masking", - spatial_cplx_masking, 0., 999999., 0), - AVOPTION_CODEC_DOUBLE("p_masking", "p block masking", - p_masking, 0., 999999., 0), - AVOPTION_CODEC_DOUBLE("dark_masking", "darkness masking", - dark_masking, 0., 999999., 0), - AVOPTION_CODEC_INT("idct_algo", "idct alghorithm", - idct_algo, 0, 8, 0), // fixme - "Auto,Int,Simple,SimpleMMX,LibMPEG2MMX,PS2,MLib,ARM,Altivec" + if (qscale < 1) + qscale = 1; + else if (qscale > 31) + qscale = 31; - AVOPTION_CODEC_INT("mb_qmin", "minimum MB quantizer", - mb_qmin, 0, 8, 0), - AVOPTION_CODEC_INT("mb_qmax", "maximum MB quantizer", - mb_qmin, 0, 8, 0), + s->qscale = qscale; + s->chroma_qscale= s->chroma_qscale_table[qscale]; - AVOPTION_CODEC_INT("me_cmp", "ME compare function", - me_cmp, 0, 24000000, 0), - AVOPTION_CODEC_INT("me_sub_cmp", "subpixel ME compare function", - me_sub_cmp, 0, 24000000, 0), - - - AVOPTION_CODEC_INT("dia_size", "ME diamond size & shape", - dia_size, 0, 24000000, 0), - AVOPTION_CODEC_INT("last_predictor_count", "amount of previous MV predictors", - last_predictor_count, 0, 24000000, 0), - - AVOPTION_CODEC_INT("pre_me", "pre pass for ME", - pre_me, 0, 24000000, 0), - AVOPTION_CODEC_INT("me_pre_cmp", "ME pre pass compare function", - me_pre_cmp, 0, 24000000, 0), - - AVOPTION_CODEC_INT("me_range", "maximum ME search range", - me_range, 0, 24000000, 0), - AVOPTION_CODEC_INT("pre_dia_size", "ME pre pass diamod size & shape", - pre_dia_size, 0, 24000000, 0), - AVOPTION_CODEC_INT("me_subpel_quality", "subpel ME quality", - me_subpel_quality, 0, 24000000, 0), - AVOPTION_CODEC_INT("me_range", "maximum ME search range", - me_range, 0, 24000000, 0), - AVOPTION_CODEC_FLAG("psnr", "calculate PSNR of compressed frames", - flags, CODEC_FLAG_PSNR, 0), - AVOPTION_CODEC_RCOVERRIDE("rc_override", "ratecontrol override (=startframe,endframe,qscale,quality_factor)", - rc_override), - AVOPTION_SUB(avoptions_common), - AVOPTION_END() -}; - -#ifdef CONFIG_ENCODERS -#ifdef CONFIG_RISKY -AVCodec h263_encoder = { - "h263", - CODEC_TYPE_VIDEO, - CODEC_ID_H263, - sizeof(MpegEncContext), - MPV_encode_init, - MPV_encode_picture, - MPV_encode_end, -}; - -AVCodec h263p_encoder = { - "h263p", - CODEC_TYPE_VIDEO, - CODEC_ID_H263P, - sizeof(MpegEncContext), - MPV_encode_init, - MPV_encode_picture, - MPV_encode_end, -}; - -AVCodec flv_encoder = { - "flv", - CODEC_TYPE_VIDEO, - CODEC_ID_FLV1, - sizeof(MpegEncContext), - MPV_encode_init, - MPV_encode_picture, - MPV_encode_end, -}; - -AVCodec rv10_encoder = { - "rv10", - CODEC_TYPE_VIDEO, - CODEC_ID_RV10, - sizeof(MpegEncContext), - MPV_encode_init, - MPV_encode_picture, - MPV_encode_end, -}; - -AVCodec mpeg4_encoder = { - "mpeg4", - CODEC_TYPE_VIDEO, - CODEC_ID_MPEG4, - sizeof(MpegEncContext), - MPV_encode_init, - MPV_encode_picture, - MPV_encode_end, - .options = mpeg4_options, -}; - -AVCodec msmpeg4v1_encoder = { - "msmpeg4v1", - CODEC_TYPE_VIDEO, - CODEC_ID_MSMPEG4V1, - sizeof(MpegEncContext), - MPV_encode_init, - MPV_encode_picture, - MPV_encode_end, - .options = mpeg4_options, -}; - -AVCodec msmpeg4v2_encoder = { - "msmpeg4v2", - CODEC_TYPE_VIDEO, - CODEC_ID_MSMPEG4V2, - sizeof(MpegEncContext), - MPV_encode_init, - MPV_encode_picture, - MPV_encode_end, - .options = mpeg4_options, -}; - -AVCodec msmpeg4v3_encoder = { - "msmpeg4", - CODEC_TYPE_VIDEO, - CODEC_ID_MSMPEG4V3, - sizeof(MpegEncContext), - MPV_encode_init, - MPV_encode_picture, - MPV_encode_end, - .options = mpeg4_options, -}; - -AVCodec wmv1_encoder = { - "wmv1", - CODEC_TYPE_VIDEO, - CODEC_ID_WMV1, - sizeof(MpegEncContext), - MPV_encode_init, - MPV_encode_picture, - MPV_encode_end, - .options = mpeg4_options, -}; - -#endif - -AVCodec mjpeg_encoder = { - "mjpeg", - CODEC_TYPE_VIDEO, - CODEC_ID_MJPEG, - sizeof(MpegEncContext), - MPV_encode_init, - MPV_encode_picture, - MPV_encode_end, -}; - -#endif //CONFIG_ENCODERS + s->y_dc_scale= s->y_dc_scale_table[ qscale ]; + s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ]; +} diff --git a/src/add-ons/media/plugins/avcodec/libavcodec/mpegvideo.h b/src/add-ons/media/plugins/avcodec/libavcodec/mpegvideo.h index 88e2f6d50b..255e1f2080 100644 --- a/src/add-ons/media/plugins/avcodec/libavcodec/mpegvideo.h +++ b/src/add-ons/media/plugins/avcodec/libavcodec/mpegvideo.h @@ -3,42 +3,48 @@ * Copyright (c) 2000, 2001, 2002 Fabrice Bellard. * Copyright (c) 2002-2004 Michael Niedermayer * - * This library is free software; you can redistribute it and/or + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. + * version 2.1 of the License, or (at your option) any later version. * - * This library is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file mpegvideo.h * mpegvideo header. */ - -#ifndef AVCODEC_MPEGVIDEO_H -#define AVCODEC_MPEGVIDEO_H + +#ifndef FFMPEG_MPEGVIDEO_H +#define FFMPEG_MPEGVIDEO_H #include "dsputil.h" +#include "bitstream.h" +#include "ratecontrol.h" +#include "parser.h" +#include "mpeg12data.h" +#include "rl.h" -#define FRAME_SKIPED 100 ///< return value for header parsers if frame is not coded +#define FRAME_SKIPPED 100 ///< return value for header parsers if frame is not coded enum OutputFormat { FMT_MPEG1, + FMT_H261, FMT_H263, - FMT_MJPEG, + FMT_MJPEG, FMT_H264, }; -#define EDGE_WIDTH 16 - #define MPEG_BUF_SIZE (16 * 1024) #define QMAT_SHIFT_MMX 16 @@ -47,83 +53,27 @@ enum OutputFormat { #define MAX_FCODE 7 #define MAX_MV 2048 -#define MAX_PICTURE_COUNT 15 +#define MAX_THREADS 8 + +#define MAX_PICTURE_COUNT 32 #define ME_MAP_SIZE 64 #define ME_MAP_SHIFT 3 #define ME_MAP_MV_BITS 11 -/* run length table */ -#define MAX_RUN 64 -#define MAX_LEVEL 64 +#define MAX_MB_BYTES (30*16*16*3/8 + 120) -#define I_TYPE FF_I_TYPE ///< Intra -#define P_TYPE FF_P_TYPE ///< Predicted -#define B_TYPE FF_B_TYPE ///< Bi-dir predicted -#define S_TYPE FF_S_TYPE ///< S(GMC)-VOP MPEG4 -#define SI_TYPE FF_SI_TYPE ///< Switching Intra -#define SP_TYPE FF_SP_TYPE ///< Switching Predicted +#define INPLACE_OFFSET 16 -typedef struct Predictor{ - double coeff; - double count; - double decay; -} Predictor; - -typedef struct RateControlEntry{ - int pict_type; - float qscale; - int mv_bits; - int i_tex_bits; - int p_tex_bits; - int misc_bits; - uint64_t expected_bits; - int new_pict_type; - float new_qscale; - int mc_mb_var_sum; - int mb_var_sum; - int i_count; - int f_code; - int b_code; -}RateControlEntry; - -/** - * rate control context. - */ -typedef struct RateControlContext{ - FILE *stats_file; - int num_entries; ///< number of RateControlEntries - RateControlEntry *entry; - double buffer_index; ///< amount of bits in the video/audio buffer - Predictor pred[5]; - double short_term_qsum; ///< sum of recent qscales - double short_term_qcount; ///< count of recent qscales - double pass1_rc_eq_output_sum;///< sum of the output of the rc equation, this is used for normalization - double pass1_wanted_bits; ///< bits which should have been outputed by the pass1 code (including complexity init) - double last_qscale; - double last_qscale_for[5]; ///< last qscale for a specific pict type, used for max_diff & ipb factor stuff - int last_mc_mb_var_sum; - int last_mb_var_sum; - uint64_t i_cplx_sum[5]; - uint64_t p_cplx_sum[5]; - uint64_t mv_bits_sum[5]; - uint64_t qscale_sum[5]; - int frame_count[5]; - int last_non_b_pict_type; -}RateControlContext; - -/** - * Scantable. - */ -typedef struct ScanTable{ - const uint8_t *scantable; - uint8_t permutated[64]; - uint8_t raster_end[64]; -#ifdef ARCH_POWERPC - /** Used by dct_quantise_alitvec to find last-non-zero */ - uint8_t __align8 inverse[64]; -#endif -} ScanTable; +/* Start codes. */ +#define SEQ_END_CODE 0x000001b7 +#define SEQ_START_CODE 0x000001b3 +#define GOP_START_CODE 0x000001b8 +#define PICTURE_START_CODE 0x00000100 +#define SLICE_MIN_START_CODE 0x00000101 +#define SLICE_MAX_START_CODE 0x000001af +#define EXT_START_CODE 0x000001b5 +#define USER_START_CODE 0x000001b2 /** * Picture. @@ -136,9 +86,8 @@ typedef struct Picture{ */ uint8_t *interpolated[3]; int16_t (*motion_val_base[2])[2]; - int8_t *ref_index[2]; uint32_t *mb_type_base; -#define MB_TYPE_INTRA MB_TYPE_INTRA4x4 //default mb_type if theres just one type +#define MB_TYPE_INTRA MB_TYPE_INTRA4x4 //default mb_type if there is just one type #define IS_INTRA4x4(a) ((a)&MB_TYPE_INTRA4x4) #define IS_INTRA16x16(a) ((a)&MB_TYPE_INTRA16x16) #define IS_PCM(a) ((a)&MB_TYPE_INTRA_PCM) @@ -160,77 +109,83 @@ typedef struct Picture{ #define IS_ACPRED(a) ((a)&MB_TYPE_ACPRED) #define IS_QUANT(a) ((a)&MB_TYPE_QUANT) #define IS_DIR(a, part, list) ((a) & (MB_TYPE_P0L0<<((part)+2*(list)))) -#define USES_LIST(a, list) ((a) & ((MB_TYPE_P0L0|MB_TYPE_P1L0)<<(2*(list)))) ///< does this mb use listX, note doesnt work if subMBs +#define USES_LIST(a, list) ((a) & ((MB_TYPE_P0L0|MB_TYPE_P1L0)<<(2*(list)))) ///< does this mb use listX, note does not work if subMBs #define HAS_CBP(a) ((a)&MB_TYPE_CBP) int field_poc[2]; ///< h264 top/bottom POC int poc; ///< h264 frame POC - int frame_num; ///< h264 frame_num - int pic_id; ///< h264 pic_num or long_term_pic_idx + int frame_num; ///< h264 frame_num (raw frame_num from slice header) + int pic_id; /**< h264 pic_num (short -> no wrap version of pic_num, + pic_num & max_pic_num; long -> long_pic_num) */ int long_ref; ///< 1->long term reference 0->short term reference + int ref_poc[2][2][16]; ///< h264 POCs of the frames used as reference (FIXME need per slice) + int ref_count[2][2]; ///< number of entries in ref_poc (FIXME need per slice) - int mb_var_sum; ///< sum of MB variance for current frame - int mc_mb_var_sum; ///< motion compensated MB variance for current frame - uint16_t *mb_var; ///< Table for MB variances - uint16_t *mc_mb_var; ///< Table for motion compensated MB variances - uint8_t *mb_mean; ///< Table for MB luminance - int32_t *mb_cmp_score; ///< Table for MB cmp scores, for mb decission FIXME remove + int mb_var_sum; ///< sum of MB variance for current frame + int mc_mb_var_sum; ///< motion compensated MB variance for current frame + uint16_t *mb_var; ///< Table for MB variances + uint16_t *mc_mb_var; ///< Table for motion compensated MB variances + uint8_t *mb_mean; ///< Table for MB luminance + int32_t *mb_cmp_score; ///< Table for MB cmp scores, for mb decision FIXME remove int b_frame_score; /* */ } Picture; -typedef struct ParseContext{ - uint8_t *buffer; - int index; - int last_index; - int buffer_size; - uint32_t state; ///< contains the last few bytes in MSB order - int frame_start_found; - int overread; ///< the number of bytes which where irreversibly read from the next frame - int overread_index; ///< the index into ParseContext.buffer of the overreaded bytes -} ParseContext; - struct MpegEncContext; /** * Motion estimation context. */ typedef struct MotionEstContext{ - int skip; ///< set if ME is skiped for the current MB - int co_located_mv[4][2]; ///< mv from last p frame for direct mode ME + AVCodecContext *avctx; + int skip; ///< set if ME is skipped for the current MB + int co_located_mv[4][2]; ///< mv from last P-frame for direct mode ME int direct_basis_mv[4][2]; - uint8_t *scratchpad; ///< data area for the me algo, so that the ME doesnt need to malloc/free - uint32_t *map; ///< map to avoid duplicate evaluations - uint32_t *score_map; ///< map to store the scores - int map_generation; + uint8_t *scratchpad; ///< data area for the ME algo, so that the ME does not need to malloc/free + uint8_t *best_mb; + uint8_t *temp_mb[2]; + uint8_t *temp; + int best_bits; + uint32_t *map; ///< map to avoid duplicate evaluations + uint32_t *score_map; ///< map to store the scores + int map_generation; int pre_penalty_factor; - int penalty_factor; + int penalty_factor; /*!< an estimate of the bits required to + code a given mv value, e.g. (1,0) takes + more bits than (0,0). We have to + estimate whether any reduction in + residual is worth the extra bits. */ int sub_penalty_factor; int mb_penalty_factor; - int pre_pass; ///< = 1 for the pre pass + int flags; + int sub_flags; + int mb_flags; + int pre_pass; ///< = 1 for the pre pass int dia_size; int xmin; int xmax; int ymin; int ymax; - uint8_t (*mv_penalty)[MAX_MV*2+1]; ///< amount of bits needed to encode a MV + int pred_x; + int pred_y; + uint8_t *src[4][4]; + uint8_t *ref[4][4]; + int stride; + int uvstride; + /* temp variables for picture complexity calculation */ + int mc_mb_var_sum_temp; + int mb_var_sum_temp; + int scene_change_score; +/* cmp, chroma_cmp;*/ + op_pixels_func (*hpel_put)[4]; + op_pixels_func (*hpel_avg)[4]; + qpel_mc_func (*qpel_put)[16]; + qpel_mc_func (*qpel_avg)[16]; + uint8_t (*mv_penalty)[MAX_MV*2+1]; ///< amount of bits needed to encode a MV + uint8_t *current_mv_penalty; int (*sub_motion_search)(struct MpegEncContext * s, - int *mx_ptr, int *my_ptr, int dmin, - int pred_x, int pred_y, uint8_t *src_data[3], - uint8_t *ref_data[6], int stride, int uvstride, - int size, int h, uint8_t * const mv_penalty); - int (*motion_search[7])(struct MpegEncContext * s, - int *mx_ptr, int *my_ptr, - int P[10][2], int pred_x, int pred_y, uint8_t *src_data[3], - uint8_t *ref_data[6], int stride, int uvstride, int16_t (*last_mv)[2], - int ref_mv_scale, uint8_t * const mv_penalty); - int (*pre_motion_search)(struct MpegEncContext * s, - int *mx_ptr, int *my_ptr, - int P[10][2], int pred_x, int pred_y, uint8_t *src_data[3], - uint8_t *ref_data[6], int stride, int uvstride, int16_t (*last_mv)[2], - int ref_mv_scale, uint8_t * const mv_penalty); - int (*get_mb_score)(struct MpegEncContext * s, int mx, int my, int pred_x, int pred_y, uint8_t *src_data[3], - uint8_t *ref_data[6], int stride, int uvstride, - uint8_t * const mv_penalty); + int *mx_ptr, int *my_ptr, int dmin, + int src_index, int ref_index, + int size, int h); }MotionEstContext; /** @@ -239,28 +194,30 @@ typedef struct MotionEstContext{ typedef struct MpegEncContext { struct AVCodecContext *avctx; /* the following parameters must be initialized before encoding */ - int width, height;///< picture size. must be a multiple of 16 + int width, height;///< picture size. must be a multiple of 16 int gop_size; - int intra_only; ///< if true, only intra pictures are generated - int bit_rate; ///< wanted bit rate - enum OutputFormat out_format; ///< output format - int h263_pred; ///< use mpeg4/h263 ac/dc predictions + int intra_only; ///< if true, only intra pictures are generated + int bit_rate; ///< wanted bit rate + enum OutputFormat out_format; ///< output format + int h263_pred; ///< use mpeg4/h263 ac/dc predictions /* the following codec id fields are deprecated in favor of codec_id */ - int h263_plus; ///< h263 plus headers + int h263_plus; ///< h263 plus headers int h263_msmpeg4; ///< generate MSMPEG4 compatible stream (deprecated, use msmpeg4_version instead) - int h263_flv; ///< use flv h263 header - - int codec_id; /* see CODEC_ID_xxx */ - int fixed_qscale; ///< fixed qscale if non zero - int encoding; ///< true if we are encoding (vs decoding) - int flags; ///< AVCodecContext.flags (HQ, MV4, ...) + int h263_flv; ///< use flv h263 header + + enum CodecID codec_id; /* see CODEC_ID_xxx */ + int fixed_qscale; ///< fixed qscale if non zero + int encoding; ///< true if we are encoding (vs decoding) + int flags; ///< AVCodecContext.flags (HQ, MV4, ...) int flags2; ///< AVCodecContext.flags2 - int max_b_frames; ///< max number of b-frames for encoding + int max_b_frames; ///< max number of b-frames for encoding int luma_elim_threshold; int chroma_elim_threshold; - int strict_std_compliance; ///< strictly follow the std (MPEG4, ...) - int workaround_bugs; ///< workaround bugs in encoders which cannot be detected automatically + int strict_std_compliance; ///< strictly follow the std (MPEG4, ...) + int workaround_bugs; ///< workaround bugs in encoders which cannot be detected automatically + int codec_tag; ///< internal codec_tag upper case converted from avctx codec_tag + int stream_codec_tag; ///< internal stream_codec_tag upper case converted from avctx stream_codec_tag /* the following fields are managed internally by the encoder */ /** bit output */ @@ -268,139 +225,154 @@ typedef struct MpegEncContext { /* sequence parameters */ int context_initialized; - int input_picture_number; ///< used to set pic->display_picture_number, shouldnt be used for/by anything else - int coded_picture_number; ///< used to set pic->coded_picture_number, shouldnt be used for/by anything else + int input_picture_number; ///< used to set pic->display_picture_number, should not be used for/by anything else + int coded_picture_number; ///< used to set pic->coded_picture_number, should not be used for/by anything else int picture_number; //FIXME remove, unclear definition - int picture_in_gop_number; ///< 0-> first pic in gop, ... - int b_frames_since_non_b; ///< used for encoding, relative to not yet reordered input - int mb_width, mb_height; ///< number of MBs horizontally & vertically - int mb_stride; ///< mb_width+1 used for some arrays to allow simple addressng of left & top MBs withoutt sig11 - int b8_stride; ///< 2*mb_width+1 used for some 8x8 block arrays to allow simple addressng - int b4_stride; ///< 4*mb_width+1 used for some 4x4 block arrays to allow simple addressng - int h_edge_pos, v_edge_pos;///< horizontal / vertical position of the right/bottom edge (pixel replicateion) - int mb_num; ///< number of MBs of a picture - int linesize; ///< line size, in bytes, may be different from width - int uvlinesize; ///< line size, for chroma in bytes, may be different from width - Picture *picture; ///< main picture buffer + int picture_in_gop_number; ///< 0-> first pic in gop, ... + int b_frames_since_non_b; ///< used for encoding, relative to not yet reordered input + int64_t user_specified_pts;///< last non zero pts from AVFrame which was passed into avcodec_encode_video() + int mb_width, mb_height; ///< number of MBs horizontally & vertically + int mb_stride; ///< mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11 + int b8_stride; ///< 2*mb_width+1 used for some 8x8 block arrays to allow simple addressing + int b4_stride; ///< 4*mb_width+1 used for some 4x4 block arrays to allow simple addressing + int h_edge_pos, v_edge_pos;///< horizontal / vertical position of the right/bottom edge (pixel replication) + int mb_num; ///< number of MBs of a picture + int linesize; ///< line size, in bytes, may be different from width + int uvlinesize; ///< line size, for chroma in bytes, may be different from width + Picture *picture; ///< main picture buffer Picture **input_picture; ///< next pictures on display order for encoding Picture **reordered_input_picture; ///< pointer to the next pictures in codedorder for encoding - - /** + + int start_mb_y; ///< start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) + int end_mb_y; ///< end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) + struct MpegEncContext *thread_context[MAX_THREADS]; + + /** * copy of the previous picture structure. * note, linesize & data, might not match the previous picture (for field pictures) */ - Picture last_picture; - - /** + Picture last_picture; + + /** * copy of the next picture structure. * note, linesize & data, might not match the next picture (for field pictures) */ Picture next_picture; - - /** + + /** * copy of the source picture structure for encoding. * note, linesize & data, might not match the source picture (for field pictures) */ Picture new_picture; - - /** + + /** * copy of the current picture structure. * note, linesize & data, might not match the current picture (for field pictures) */ - Picture current_picture; ///< buffer to store the decompressed current picture - + Picture current_picture; ///< buffer to store the decompressed current picture + Picture *last_picture_ptr; ///< pointer to the previous picture. - Picture *next_picture_ptr; ///< pointer to the next picture (for bidir pred) + Picture *next_picture_ptr; ///< pointer to the next picture (for bidir pred) Picture *current_picture_ptr; ///< pointer to the current picture uint8_t *visualization_buffer[3]; //< temporary buffer vor MV visualization - int last_dc[3]; ///< last DC values for MPEG1 - int16_t *dc_val[3]; ///< used for mpeg4 DC prediction, all 3 arrays must be continuous + int last_dc[3]; ///< last DC values for MPEG1 + int16_t *dc_val_base; + int16_t *dc_val[3]; ///< used for mpeg4 DC prediction, all 3 arrays must be continuous int16_t dc_cache[4*5]; int y_dc_scale, c_dc_scale; - uint8_t *y_dc_scale_table; ///< qscale -> y_dc_scale table - uint8_t *c_dc_scale_table; ///< qscale -> c_dc_scale table + const uint8_t *y_dc_scale_table; ///< qscale -> y_dc_scale table + const uint8_t *c_dc_scale_table; ///< qscale -> c_dc_scale table const uint8_t *chroma_qscale_table; ///< qscale -> chroma_qscale (h263) + uint8_t *coded_block_base; uint8_t *coded_block; ///< used for coded block pattern prediction (msmpeg4v3, wmv1) - int16_t (*ac_val[3])[16]; ///< used for for mpeg4 AC prediction, all 3 arrays must be continuous + int16_t (*ac_val_base)[16]; + int16_t (*ac_val[3])[16]; ///< used for for mpeg4 AC prediction, all 3 arrays must be continuous int ac_pred; - uint8_t *prev_pict_types; ///< previous picture types in bitstream order, used for mb skip + uint8_t *prev_pict_types; ///< previous picture types in bitstream order, used for mb skip #define PREV_PICT_TYPES_BUFFER_SIZE 256 - int mb_skiped; ///< MUST BE SET only during DECODING - uint8_t *mbskip_table; /**< used to avoid copy if macroblock skipped (for black regions for example) + int mb_skipped; ///< MUST BE SET only during DECODING + uint8_t *mbskip_table; /**< used to avoid copy if macroblock skipped (for black regions for example) and used for b-frame encoding & decoding (contains skip table of next P Frame) */ - uint8_t *mbintra_table; ///< used to avoid setting {ac, dc, cbp}-pred stuff to zero on inter MB decoding - uint8_t *cbp_table; ///< used to store cbp, ac_pred for partitioned decoding - uint8_t *pred_dir_table; ///< used to store pred_dir for partitioned decoding + uint8_t *mbintra_table; ///< used to avoid setting {ac, dc, cbp}-pred stuff to zero on inter MB decoding + uint8_t *cbp_table; ///< used to store cbp, ac_pred for partitioned decoding + uint8_t *pred_dir_table; ///< used to store pred_dir for partitioned decoding uint8_t *allocated_edge_emu_buffer; - uint8_t *edge_emu_buffer; ///< points into the middle of allocated_edge_emu_buffer + uint8_t *edge_emu_buffer; ///< points into the middle of allocated_edge_emu_buffer + uint8_t *rd_scratchpad; ///< scratchpad for rate distortion mb decision + uint8_t *obmc_scratchpad; + uint8_t *b_scratchpad; ///< scratchpad used for writing into write only buffers - int qscale; ///< QP - int chroma_qscale; ///< chroma QP - int lambda; ///< lagrange multipler used in rate distortion - int lambda2; ///< (lambda*lambda) >> FF_LAMBDA_SHIFT + int qscale; ///< QP + int chroma_qscale; ///< chroma QP + unsigned int lambda; ///< lagrange multipler used in rate distortion + unsigned int lambda2; ///< (lambda*lambda) >> FF_LAMBDA_SHIFT int *lambda_table; - int adaptive_quant; ///< use adaptive quantization - int dquant; ///< qscale difference to prev qscale - int pict_type; ///< I_TYPE, P_TYPE, B_TYPE, ... - int last_pict_type; - int last_non_b_pict_type; ///< used for mpeg4 gmc b-frames & ratecontrol + int adaptive_quant; ///< use adaptive quantization + int dquant; ///< qscale difference to prev qscale + int pict_type; ///< FF_I_TYPE, FF_P_TYPE, FF_B_TYPE, ... + int last_pict_type; //FIXME removes + int last_non_b_pict_type; ///< used for mpeg4 gmc b-frames & ratecontrol + int dropable; int frame_rate_index; - /* motion compensation */ - int unrestricted_mv; ///< mv can point outside of the coded picture - int h263_long_vectors; ///< use horrible h263v1 long vector mode - int decode; ///< if 0 then decoding will be skiped (for encoding b frames for example) + int last_lambda_for[5]; ///< last lambda for a specific pict type + int skipdct; ///< skip dct and code zero residual - DSPContext dsp; ///< pointers for accelerated dsp fucntions - int f_code; ///< forward MV resolution - int b_code; ///< backward MV resolution for B Frames (mpeg4) + /* motion compensation */ + int unrestricted_mv; ///< mv can point outside of the coded picture + int h263_long_vectors; ///< use horrible h263v1 long vector mode + int decode; ///< if 0 then decoding will be skipped (for encoding b frames for example) + + DSPContext dsp; ///< pointers for accelerated dsp functions + int f_code; ///< forward MV resolution + int b_code; ///< backward MV resolution for B Frames (mpeg4) int16_t (*p_mv_table_base)[2]; int16_t (*b_forw_mv_table_base)[2]; int16_t (*b_back_mv_table_base)[2]; - int16_t (*b_bidir_forw_mv_table_base)[2]; - int16_t (*b_bidir_back_mv_table_base)[2]; + int16_t (*b_bidir_forw_mv_table_base)[2]; + int16_t (*b_bidir_back_mv_table_base)[2]; int16_t (*b_direct_mv_table_base)[2]; int16_t (*p_field_mv_table_base[2][2])[2]; int16_t (*b_field_mv_table_base[2][2][2])[2]; - int16_t (*p_mv_table)[2]; ///< MV table (1MV per MB) p-frame encoding - int16_t (*b_forw_mv_table)[2]; ///< MV table (1MV per MB) forward mode b-frame encoding - int16_t (*b_back_mv_table)[2]; ///< MV table (1MV per MB) backward mode b-frame encoding - int16_t (*b_bidir_forw_mv_table)[2]; ///< MV table (1MV per MB) bidir mode b-frame encoding - int16_t (*b_bidir_back_mv_table)[2]; ///< MV table (1MV per MB) bidir mode b-frame encoding - int16_t (*b_direct_mv_table)[2]; ///< MV table (1MV per MB) direct mode b-frame encoding + int16_t (*p_mv_table)[2]; ///< MV table (1MV per MB) p-frame encoding + int16_t (*b_forw_mv_table)[2]; ///< MV table (1MV per MB) forward mode b-frame encoding + int16_t (*b_back_mv_table)[2]; ///< MV table (1MV per MB) backward mode b-frame encoding + int16_t (*b_bidir_forw_mv_table)[2]; ///< MV table (1MV per MB) bidir mode b-frame encoding + int16_t (*b_bidir_back_mv_table)[2]; ///< MV table (1MV per MB) bidir mode b-frame encoding + int16_t (*b_direct_mv_table)[2]; ///< MV table (1MV per MB) direct mode b-frame encoding int16_t (*p_field_mv_table[2][2])[2]; ///< MV table (2MV per MB) interlaced p-frame encoding int16_t (*b_field_mv_table[2][2][2])[2];///< MV table (4MV per MB) interlaced b-frame encoding uint8_t (*p_field_select_table[2]); uint8_t (*b_field_select_table[2][2]); - int me_method; ///< ME algorithm - int scene_change_score; + int me_method; ///< ME algorithm int mv_dir; -#define MV_DIR_BACKWARD 1 -#define MV_DIR_FORWARD 2 +#define MV_DIR_FORWARD 1 +#define MV_DIR_BACKWARD 2 #define MV_DIRECT 4 ///< bidirectional mode where the difference equals the MV of the last P/S/I-Frame (mpeg4) int mv_type; -#define MV_TYPE_16X16 0 ///< 1 vector for the whole mb -#define MV_TYPE_8X8 1 ///< 4 vectors (h263, mpeg4 4MV) -#define MV_TYPE_16X8 2 ///< 2 vectors, one per 16x8 block -#define MV_TYPE_FIELD 3 ///< 2 vectors, one per field -#define MV_TYPE_DMV 4 ///< 2 vectors, special mpeg2 Dual Prime Vectors - /**motion vectors for a macroblock +#define MV_TYPE_16X16 0 ///< 1 vector for the whole mb +#define MV_TYPE_8X8 1 ///< 4 vectors (h263, mpeg4 4MV) +#define MV_TYPE_16X8 2 ///< 2 vectors, one per 16x8 block +#define MV_TYPE_FIELD 3 ///< 2 vectors, one per field +#define MV_TYPE_DMV 4 ///< 2 vectors, special mpeg2 Dual Prime Vectors + /**motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend on type third " : 0 = x, 1 = y */ int mv[2][4][2]; int field_select[2][2]; - int last_mv[2][2][2]; ///< last MV, used for MV prediction in MPEG1 & B-frame MPEG4 - uint8_t *fcode_tab; ///< smallest fcode needed for each MV - + int last_mv[2][2][2]; ///< last MV, used for MV prediction in MPEG1 & B-frame MPEG4 + uint8_t *fcode_tab; ///< smallest fcode needed for each MV + int16_t direct_scale_mv[2][64]; ///< precomputed to avoid divisions in ff_mpeg4_set_direct_mv + MotionEstContext me; - int no_rounding; /**< apply no rounding to motion compensation (MPEG4, msmpeg4, ...) - for b-frames rounding mode is allways 0 */ + int no_rounding; /**< apply no rounding to motion compensation (MPEG4, msmpeg4, ...) + for b-frames rounding mode is always 0 */ - int hurry_up; /**< when set to 1 during decoding, b frames will be skiped + int hurry_up; /**< when set to 1 during decoding, b frames will be skipped when set to 2 idct/dequant will be skipped too */ - + /* macroblock layer */ int mb_x, mb_y; int mb_skip_run; @@ -409,7 +381,7 @@ typedef struct MpegEncContext { #define CANDIDATE_MB_TYPE_INTRA 0x01 #define CANDIDATE_MB_TYPE_INTER 0x02 #define CANDIDATE_MB_TYPE_INTER4V 0x04 -#define CANDIDATE_MB_TYPE_SKIPED 0x08 +#define CANDIDATE_MB_TYPE_SKIPPED 0x08 //#define MB_TYPE_GMC 0x10 #define CANDIDATE_MB_TYPE_DIRECT 0x10 @@ -422,10 +394,12 @@ typedef struct MpegEncContext { #define CANDIDATE_MB_TYPE_BACKWARD_I 0x400 #define CANDIDATE_MB_TYPE_BIDIR_I 0x800 +#define CANDIDATE_MB_TYPE_DIRECT0 0x1000 + int block_index[6]; ///< index to current MB in block based arrays with edges int block_wrap[6]; uint8_t *dest[3]; - + int *mb_index2xy; ///< mb_index -> mb_x + mb_y*mb_stride /** matrix transmitted in the bitstream */ @@ -434,11 +408,11 @@ typedef struct MpegEncContext { uint16_t inter_matrix[64]; uint16_t chroma_inter_matrix[64]; #define QUANT_BIAS_SHIFT 8 - int intra_quant_bias; ///< bias for the quantizer - int inter_quant_bias; ///< bias for the quantizer - int min_qcoeff; ///< minimum encodable coefficient - int max_qcoeff; ///< maximum encodable coefficient - int ac_esc_length; ///< num of bits needed to encode the longest esc + int intra_quant_bias; ///< bias for the quantizer + int inter_quant_bias; ///< bias for the quantizer + int min_qcoeff; ///< minimum encodable coefficient + int max_qcoeff; ///< maximum encodable coefficient + int ac_esc_length; ///< num of bits needed to encode the longest esc uint8_t *intra_ac_vlc_length; uint8_t *intra_ac_vlc_last_length; uint8_t *inter_ac_vlc_length; @@ -447,7 +421,7 @@ typedef struct MpegEncContext { uint8_t *chroma_dc_vlc_length; #define UNI_AC_ENC_INDEX(run,level) ((run)*128 + (level)) - int coded_score[6]; + int coded_score[8]; /** precomputed matrix (combine qscale and DCT renorm) */ int (*q_intra_matrix)[64]; @@ -455,13 +429,13 @@ typedef struct MpegEncContext { /** identical to the above but for MMX & these are not permutated, second 64 entries are bias*/ uint16_t (*q_intra_matrix16)[2][64]; uint16_t (*q_inter_matrix16)[2][64]; - int block_last_index[6]; ///< last non zero coefficient in block + int block_last_index[12]; ///< last non zero coefficient in block /* scantables */ - ScanTable __align8 intra_scantable; + DECLARE_ALIGNED_8(ScanTable, intra_scantable); ScanTable intra_h_scantable; ScanTable intra_v_scantable; ScanTable inter_scantable; ///< if inter == intra then intra should be used to reduce tha cache usage - + /* noise reduction */ int (*dct_error_sum)[64]; int dct_count[2]; @@ -472,7 +446,8 @@ typedef struct MpegEncContext { /* bit rate control */ int64_t wanted_bits; int64_t total_bits; - int frame_bits; ///< bits used for the current frame + int frame_bits; ///< bits used for the current frame + int next_lambda; ///< next lambda used for retrying to encode a frame RateControlContext rc_context; ///< contains stuff only accessed in ratecontrol.c /* statistics, used for 2-pass encoding */ @@ -486,11 +461,11 @@ typedef struct MpegEncContext { int skip_count; int misc_bits; ///< cbp, mb_type int last_bits; ///< temp var used for calculating the above vars - + /* error concealment / resync */ int error_count; - uint8_t *error_status_table; ///< table of the error status of each MB -#define VP_START 1 ///< current MB is the first after a resync marker + uint8_t *error_status_table; ///< table of the error status of each MB +#define VP_START 1 ///< current MB is the first after a resync marker #define AC_ERROR 2 #define DC_ERROR 4 #define MV_ERROR 8 @@ -498,40 +473,40 @@ typedef struct MpegEncContext { #define DC_END 32 #define MV_END 64 //FIXME some prefix? - - int resync_mb_x; ///< x position of last resync marker - int resync_mb_y; ///< y position of last resync marker - GetBitContext last_resync_gb; ///< used to search for the next resync marker + + int resync_mb_x; ///< x position of last resync marker + int resync_mb_y; ///< y position of last resync marker + GetBitContext last_resync_gb; ///< used to search for the next resync marker int mb_num_left; ///< number of MBs left in this video packet (for partitioned Slices only) - int next_p_frame_damaged; ///< set if the next p frame is damaged, to avoid showing trashed b frames + int next_p_frame_damaged; ///< set if the next p frame is damaged, to avoid showing trashed b frames int error_resilience; - + ParseContext parse_context; /* H.263 specific */ int gob_index; int obmc; ///< overlapped block motion compensation - + /* H.263+ specific */ - int umvplus; ///< == H263+ && unrestricted_mv - int h263_aic; ///< Advanded INTRA Coding (AIC) + int umvplus; ///< == H263+ && unrestricted_mv + int h263_aic; ///< Advanded INTRA Coding (AIC) int h263_aic_dir; ///< AIC direction: 0 = left, 1 = top int h263_slice_structured; int alt_inter_vlc; ///< alternative inter vlc int modified_quant; - int loop_filter; - + int loop_filter; + int custom_pcf; + /* mpeg4 specific */ - int time_increment_resolution; - int time_increment_bits; ///< number of bits to represent the fractional part of time + int time_increment_bits; ///< number of bits to represent the fractional part of time int last_time_base; - int time_base; ///< time in seconds of last I,P,S Frame - int64_t time; ///< time of current frame + int time_base; ///< time in seconds of last I,P,S Frame + int64_t time; ///< time of current frame int64_t last_non_b_time; - uint16_t pp_time; ///< time distance between the last 2 p,s,i frames - uint16_t pb_time; ///< time distance between the last b and p,s,i frame + uint16_t pp_time; ///< time distance between the last 2 p,s,i frames + uint16_t pb_time; ///< time distance between the last b and p,s,i frame uint16_t pp_field_time; - uint16_t pb_field_time; ///< like above, just for interlaced + uint16_t pb_field_time; ///< like above, just for interlaced int shape; int vol_sprite_usage; int sprite_width; @@ -541,12 +516,12 @@ typedef struct MpegEncContext { int sprite_brightness_change; int num_sprite_warping_points; int real_sprite_warping_points; - int sprite_offset[2][2]; ///< sprite offset[isChroma][isMVY] - int sprite_delta[2][2]; ///< sprite_delta [isY][isMVY] - int sprite_shift[2]; ///< sprite shift [isChroma] + int sprite_offset[2][2]; ///< sprite offset[isChroma][isMVY] + int sprite_delta[2][2]; ///< sprite_delta [isY][isMVY] + int sprite_shift[2]; ///< sprite shift [isChroma] int mcsel; int quant_precision; - int quarter_sample; ///< 1->qpel, 0->half pel ME/MC + int quarter_sample; ///< 1->qpel, 0->half pel ME/MC int scalability; int hierachy_type; int enhancement_type; @@ -555,47 +530,42 @@ typedef struct MpegEncContext { int aspect_ratio_info; //FIXME remove int sprite_warping_accuracy; int low_latency_sprite; - int data_partitioning; ///< data partitioning flag from header - int partitioned_frame; ///< is current frame partitioned - int rvlc; ///< reversible vlc + int data_partitioning; ///< data partitioning flag from header + int partitioned_frame; ///< is current frame partitioned + int rvlc; ///< reversible vlc int resync_marker; ///< could this stream contain resync markers - int low_delay; ///< no reordering needed / has no b-frames + int low_delay; ///< no reordering needed / has no b-frames int vo_type; - int vol_control_parameters; ///< does the stream contain the low_delay flag, used to workaround buggy encoders - int intra_dc_threshold; ///< QP above whch the ac VLC should be used for intra dc - PutBitContext tex_pb; ///< used for data partitioned VOPs - PutBitContext pb2; ///< used for data partitioned VOPs -#define PB_BUFFER_SIZE 1024*256 - uint8_t *tex_pb_buffer; - uint8_t *pb2_buffer; + int vol_control_parameters; ///< does the stream contain the low_delay flag, used to workaround buggy encoders + int intra_dc_threshold; ///< QP above whch the ac VLC should be used for intra dc + int use_intra_dc_vlc; + PutBitContext tex_pb; ///< used for data partitioned VOPs + PutBitContext pb2; ///< used for data partitioned VOPs int mpeg_quant; - int t_frame; ///< time distance of first I -> B, used for interlaced b frames - int padding_bug_score; ///< used to detect the VERY common padding bug in MPEG4 + int t_frame; ///< time distance of first I -> B, used for interlaced b frames + int padding_bug_score; ///< used to detect the VERY common padding bug in MPEG4 /* divx specific, used to workaround (many) bugs in divx5 */ int divx_version; int divx_build; int divx_packed; -#define BITSTREAM_BUFFER_SIZE 1024*256 uint8_t *bitstream_buffer; //Divx 5.01 puts several frames in a single one, this is used to reorder them int bitstream_buffer_size; - + unsigned int allocated_bitstream_buffer_size; + int xvid_build; - + /* lavc specific stuff, used to workaround bugs in libavcodec */ - int ffmpeg_version; int lavc_build; - + /* RV10 specific */ - int rv10_version; ///< RV10 version: 0 or 3 + int rv10_version; ///< RV10 version: 0 or 3 int rv10_first_dc_coded[3]; - + /* MJPEG specific */ struct MJpegContext *mjpeg_ctx; - int mjpeg_vsample[3]; ///< vertical sampling factors, default = {2, 1, 1} - int mjpeg_hsample[3]; ///< horizontal sampling factors, default = {2, 1, 1} - int mjpeg_write_tables; ///< do we want to have quantisation- and huffmantables in the jpeg file ? - int mjpeg_data_only_frames; ///< frames only with SOI, SOS and EOI markers + int mjpeg_vsample[3]; ///< vertical sampling factors, default = {2, 1, 1} + int mjpeg_hsample[3]; ///< horizontal sampling factors, default = {2, 1, 1} /* MSMPEG4 specific */ int mv_table_index; @@ -603,8 +573,8 @@ typedef struct MpegEncContext { int rl_chroma_table_index; int dc_table_index; int use_skip_mb_code; - int slice_height; ///< in macroblocks - int first_slice_line; ///< used in mpeg4 too to handle resync markers + int slice_height; ///< in macroblocks + int first_slice_line; ///< used in mpeg4 too to handle resync markers int flipflop_rounding; int msmpeg4_version; ///< 0=not msmpeg4, 1=mp41, 2=mp42, 3=mp43/divx3 4=wmv1/7 5=wmv2/8 int per_mb_rl_table; @@ -619,11 +589,11 @@ typedef struct MpegEncContext { GetBitContext gb; /* Mpeg1 specific */ - int gop_picture_number; ///< index of the first picture of a GOP based on fake_pic_num & mpeg1 specific - int last_mv_dir; ///< last mv_dir, used for b frame encoding + int gop_picture_number; ///< index of the first picture of a GOP based on fake_pic_num & mpeg1 specific + int last_mv_dir; ///< last mv_dir, used for b frame encoding int broken_link; ///< no_output_of_prior_pics_flag - uint8_t *vbv_delay_ptr; ///< pointer to vbv_delay in the bitstream - + uint8_t *vbv_delay_ptr; ///< pointer to vbv_delay in the bitstream + /* MPEG2 specific - I wish I had not to support this mess. */ int progressive_sequence; int mpeg_f_code[2][2]; @@ -642,6 +612,13 @@ typedef struct MpegEncContext { int alternate_scan; int repeat_first_field; int chroma_420_type; + int chroma_format; +#define CHROMA_420 1 +#define CHROMA_422 2 +#define CHROMA_444 3 + int chroma_x_shift;//depend on pix_format, that depend on chroma_format + int chroma_y_shift; + int progressive_frame; int full_pel[2]; int interlaced_dct; @@ -650,30 +627,34 @@ typedef struct MpegEncContext { /* RTP specific */ int rtp_mode; - + uint8_t *ptr_lastgob; int swap_uv;//vcr2 codec is mpeg2 varint with UV swaped short * pblocks[12]; - - DCTELEM (*block)[64]; ///< points to one of the following blocks - DCTELEM (*blocks)[6][64]; // for HQ mode we need to keep the best block + + DCTELEM (*block)[64]; ///< points to one of the following blocks + DCTELEM (*blocks)[8][64]; // for HQ mode we need to keep the best block int (*decode_mb)(struct MpegEncContext *s, DCTELEM block[6][64]); // used by some codecs to avoid a switch() #define SLICE_OK 0 #define SLICE_ERROR -1 #define SLICE_END -2 ///>s->avctx->lowres; + s->block_index[0]+=2; s->block_index[1]+=2; s->block_index[2]+=2; s->block_index[3]+=2; s->block_index[4]++; s->block_index[5]++; - s->dest[0]+= 16; - s->dest[1]+= 8; - s->dest[2]+= 8; + s->dest[0]+= 2*block_size; + s->dest[1]+= block_size; + s->dest[2]+= block_size; } static inline int get_bits_diff(MpegEncContext *s){ - const int bits= get_bit_count(&s->pb); + const int bits= put_bits_count(&s->pb); const int last= s->last_bits; s->last_bits = bits; @@ -757,6 +724,14 @@ static inline int get_bits_diff(MpegEncContext *s){ return bits - last; } +static inline int ff_h263_round_chroma(int x){ + static const uint8_t h263_chroma_roundtab[16] = { + // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 + 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, + }; + return h263_chroma_roundtab[x & 0xf] + (x >> 3); +} + /* motion_est.c */ void ff_estimate_p_frame_motion(MpegEncContext * s, int mb_x, int mb_y); @@ -766,14 +741,16 @@ int ff_get_best_fcode(MpegEncContext * s, int16_t (*mv_table)[2], int type); void ff_fix_long_p_mvs(MpegEncContext * s); void ff_fix_long_mvs(MpegEncContext * s, uint8_t *field_select_table, int field_select, int16_t (*mv_table)[2], int f_code, int type, int truncate); -void ff_init_me(MpegEncContext *s); +int ff_init_me(MpegEncContext *s); int ff_pre_estimate_p_frame_motion(MpegEncContext * s, int mb_x, int mb_y); - +int ff_epzs_motion_search(MpegEncContext * s, int *mx_ptr, int *my_ptr, + int P[10][2], int src_index, int ref_index, int16_t (*last_mv)[2], + int ref_mv_scale, int size, int h); +int ff_get_mb_score(MpegEncContext * s, int mx, int my, int src_index, + int ref_index, int size, int h, int add_rate); /* mpeg12.c */ -extern const int16_t ff_mpeg1_default_intra_matrix[64]; -extern const int16_t ff_mpeg1_default_non_intra_matrix[64]; -extern uint8_t ff_mpeg1_dc_scale_table[128]; +extern const uint8_t ff_mpeg1_dc_scale_table[128]; void mpeg1_encode_picture_header(MpegEncContext *s, int picture_number); void mpeg1_encode_mb(MpegEncContext *s, @@ -782,64 +759,47 @@ void mpeg1_encode_mb(MpegEncContext *s, void ff_mpeg1_encode_init(MpegEncContext *s); void ff_mpeg1_encode_slice_header(MpegEncContext *s); void ff_mpeg1_clean_buffers(MpegEncContext *s); +int ff_mpeg1_find_frame_end(ParseContext *pc, const uint8_t *buf, int buf_size); - -/** RLTable. */ -typedef struct RLTable { - int n; ///< number of entries of table_vlc minus 1 - int last; ///< number of values for last = 0 - const uint16_t (*table_vlc)[2]; - const int8_t *table_run; - const int8_t *table_level; - uint8_t *index_run[2]; ///< encoding only - int8_t *max_level[2]; ///< encoding & decoding - int8_t *max_run[2]; ///< encoding & decoding - VLC vlc; ///< decoding only deprected FIXME remove - RL_VLC_ELEM *rl_vlc[32]; ///< decoding only -} RLTable; - -void init_rl(RLTable *rl); -void init_vlc_rl(RLTable *rl); - -static inline int get_rl_index(const RLTable *rl, int last, int run, int level) -{ - int index; - index = rl->index_run[last][run]; - if (index >= rl->n) - return rl->n; - if (level > rl->max_level[last][run]) - return rl->n; - return index + level - 1; -} - -extern uint8_t ff_mpeg4_y_dc_scale_table[32]; -extern uint8_t ff_mpeg4_c_dc_scale_table[32]; -extern uint8_t ff_aic_dc_scale_table[32]; +extern const uint8_t ff_mpeg4_y_dc_scale_table[32]; +extern const uint8_t ff_mpeg4_c_dc_scale_table[32]; +extern const uint8_t ff_aic_dc_scale_table[32]; extern const int16_t ff_mpeg4_default_intra_matrix[64]; extern const int16_t ff_mpeg4_default_non_intra_matrix[64]; extern const uint8_t ff_h263_chroma_qscale_table[32]; extern const uint8_t ff_h263_loop_filter_strength[32]; - -int ff_h263_decode_init(AVCodecContext *avctx); -int ff_h263_decode_frame(AVCodecContext *avctx, - void *data, int *data_size, - uint8_t *buf, int buf_size); -int ff_h263_decode_end(AVCodecContext *avctx); -void h263_encode_mb(MpegEncContext *s, +/* h261.c */ +void ff_h261_loop_filter(MpegEncContext *s); +void ff_h261_reorder_mb_index(MpegEncContext* s); +void ff_h261_encode_mb(MpegEncContext *s, DCTELEM block[6][64], int motion_x, int motion_y); -void mpeg4_encode_mb(MpegEncContext *s, +void ff_h261_encode_picture_header(MpegEncContext * s, int picture_number); +void ff_h261_encode_init(MpegEncContext *s); +int ff_h261_get_picture_format(int width, int height); + + +/* h263.c, h263dec.c */ +int ff_h263_decode_init(AVCodecContext *avctx); +int ff_h263_decode_frame(AVCodecContext *avctx, + void *data, int *data_size, + const uint8_t *buf, int buf_size); +int ff_h263_decode_end(AVCodecContext *avctx); +void h263_encode_mb(MpegEncContext *s, + DCTELEM block[6][64], + int motion_x, int motion_y); +void mpeg4_encode_mb(MpegEncContext *s, DCTELEM block[6][64], int motion_x, int motion_y); void h263_encode_picture_header(MpegEncContext *s, int picture_number); void ff_flv_encode_picture_header(MpegEncContext *s, int picture_number); void h263_encode_gob_header(MpegEncContext * s, int mb_line); -int16_t *h263_pred_motion(MpegEncContext * s, int block, +int16_t *h263_pred_motion(MpegEncContext * s, int block, int dir, int *px, int *py); -void mpeg4_pred_ac(MpegEncContext * s, DCTELEM *block, int n, +void mpeg4_pred_ac(MpegEncContext * s, DCTELEM *block, int n, int dir); -void ff_set_mpeg4_time(MpegEncContext * s, int picture_number); +void ff_set_mpeg4_time(MpegEncContext * s); void mpeg4_encode_picture_header(MpegEncContext *s, int picture_number); void h263_encode_init(MpegEncContext *s); void h263_decode_init_vlc(MpegEncContext *s); @@ -870,19 +830,21 @@ int ff_mpeg4_decode_partitions(MpegEncContext *s); int ff_mpeg4_get_video_packet_prefix_length(MpegEncContext *s); int ff_h263_resync(MpegEncContext *s); int ff_h263_get_gob_height(MpegEncContext *s); +void ff_mpeg4_init_direct_mv(MpegEncContext *s); int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my); -inline int ff_h263_round_chroma(int x); +void ff_h263_encode_motion(MpegEncContext * s, int val, int f_code); /* rv10.c */ void rv10_encode_picture_header(MpegEncContext *s, int picture_number); int rv_decode_dc(MpegEncContext *s, int n); +void rv20_encode_picture_header(MpegEncContext *s, int picture_number); /* msmpeg4.c */ void msmpeg4_encode_picture_header(MpegEncContext * s, int picture_number); void msmpeg4_encode_ext_header(MpegEncContext * s); -void msmpeg4_encode_mb(MpegEncContext * s, +void msmpeg4_encode_mb(MpegEncContext * s, DCTELEM block[6][64], int motion_x, int motion_y); int msmpeg4_decode_picture_header(MpegEncContext * s); @@ -897,29 +859,9 @@ void ff_mspel_motion(MpegEncContext *s, uint8_t **ref_picture, op_pixels_func (*pix_op)[4], int motion_x, int motion_y, int h); int ff_wmv2_encode_picture_header(MpegEncContext * s, int picture_number); -void ff_wmv2_encode_mb(MpegEncContext * s, +void ff_wmv2_encode_mb(MpegEncContext * s, DCTELEM block[6][64], int motion_x, int motion_y); -/* mjpeg.c */ -int mjpeg_init(MpegEncContext *s); -void mjpeg_close(MpegEncContext *s); -void mjpeg_encode_mb(MpegEncContext *s, - DCTELEM block[6][64]); -void mjpeg_picture_header(MpegEncContext *s); -void mjpeg_picture_trailer(MpegEncContext *s); +#endif /* FFMPEG_MPEGVIDEO_H */ - -/* rate control */ -int ff_rate_control_init(MpegEncContext *s); -float ff_rate_estimate_qscale(MpegEncContext *s); -void ff_write_pass1_stats(MpegEncContext *s); -void ff_rate_control_uninit(MpegEncContext *s); -double ff_eval(char *s, double *const_value, const char **const_name, - double (**func1)(void *, double), const char **func1_name, - double (**func2)(void *, double, double), char **func2_name, - void *opaque); -int ff_vbv_update(MpegEncContext *s, int frame_size); - - -#endif /* AVCODEC_MPEGVIDEO_H */ diff --git a/src/add-ons/media/plugins/avcodec/libavcodec/mpegvideo_common.h b/src/add-ons/media/plugins/avcodec/libavcodec/mpegvideo_common.h new file mode 100644 index 0000000000..4f3396f589 --- /dev/null +++ b/src/add-ons/media/plugins/avcodec/libavcodec/mpegvideo_common.h @@ -0,0 +1,901 @@ +/* + * The simplest mpeg encoder (well, it was the simplest!) + * Copyright (c) 2000,2001 Fabrice Bellard. + * Copyright (c) 2002-2004 Michael Niedermayer + * + * 4MV & hq & B-frame encoding stuff by Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file mpegvideo_common.h + * The simplest mpeg encoder (well, it was the simplest!). + */ + +#ifndef FFMPEG_MPEGVIDEO_COMMON_H +#define FFMPEG_MPEGVIDEO_COMMON_H + +#include "avcodec.h" +#include "dsputil.h" +#include "mpegvideo.h" +#include "mjpegenc.h" +#include "msmpeg4.h" +#include "faandct.h" +#include + +int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow); +int dct_quantize_trellis_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow); +void denoise_dct_c(MpegEncContext *s, DCTELEM *block); +void copy_picture(Picture *dst, Picture *src); + +/** + * allocates a Picture + * The pixels are allocated/set by calling get_buffer() if shared=0 + */ +int alloc_picture(MpegEncContext *s, Picture *pic, int shared); + +/** + * sets the given MpegEncContext to common defaults (same for encoding and decoding). + * the changed fields will not depend upon the prior state of the MpegEncContext. + */ +void MPV_common_defaults(MpegEncContext *s); + +static inline void gmc1_motion(MpegEncContext *s, + uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, + uint8_t **ref_picture) +{ + uint8_t *ptr; + int offset, src_x, src_y, linesize, uvlinesize; + int motion_x, motion_y; + int emu=0; + + motion_x= s->sprite_offset[0][0]; + motion_y= s->sprite_offset[0][1]; + src_x = s->mb_x * 16 + (motion_x >> (s->sprite_warping_accuracy+1)); + src_y = s->mb_y * 16 + (motion_y >> (s->sprite_warping_accuracy+1)); + motion_x<<=(3-s->sprite_warping_accuracy); + motion_y<<=(3-s->sprite_warping_accuracy); + src_x = av_clip(src_x, -16, s->width); + if (src_x == s->width) + motion_x =0; + src_y = av_clip(src_y, -16, s->height); + if (src_y == s->height) + motion_y =0; + + linesize = s->linesize; + uvlinesize = s->uvlinesize; + + ptr = ref_picture[0] + (src_y * linesize) + src_x; + + if(s->flags&CODEC_FLAG_EMU_EDGE){ + if( (unsigned)src_x >= s->h_edge_pos - 17 + || (unsigned)src_y >= s->v_edge_pos - 17){ + ff_emulated_edge_mc(s->edge_emu_buffer, ptr, linesize, 17, 17, src_x, src_y, s->h_edge_pos, s->v_edge_pos); + ptr= s->edge_emu_buffer; + } + } + + if((motion_x|motion_y)&7){ + s->dsp.gmc1(dest_y , ptr , linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding); + s->dsp.gmc1(dest_y+8, ptr+8, linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding); + }else{ + int dxy; + + dxy= ((motion_x>>3)&1) | ((motion_y>>2)&2); + if (s->no_rounding){ + s->dsp.put_no_rnd_pixels_tab[0][dxy](dest_y, ptr, linesize, 16); + }else{ + s->dsp.put_pixels_tab [0][dxy](dest_y, ptr, linesize, 16); + } + } + + if(ENABLE_GRAY && s->flags&CODEC_FLAG_GRAY) return; + + motion_x= s->sprite_offset[1][0]; + motion_y= s->sprite_offset[1][1]; + src_x = s->mb_x * 8 + (motion_x >> (s->sprite_warping_accuracy+1)); + src_y = s->mb_y * 8 + (motion_y >> (s->sprite_warping_accuracy+1)); + motion_x<<=(3-s->sprite_warping_accuracy); + motion_y<<=(3-s->sprite_warping_accuracy); + src_x = av_clip(src_x, -8, s->width>>1); + if (src_x == s->width>>1) + motion_x =0; + src_y = av_clip(src_y, -8, s->height>>1); + if (src_y == s->height>>1) + motion_y =0; + + offset = (src_y * uvlinesize) + src_x; + ptr = ref_picture[1] + offset; + if(s->flags&CODEC_FLAG_EMU_EDGE){ + if( (unsigned)src_x >= (s->h_edge_pos>>1) - 9 + || (unsigned)src_y >= (s->v_edge_pos>>1) - 9){ + ff_emulated_edge_mc(s->edge_emu_buffer, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1); + ptr= s->edge_emu_buffer; + emu=1; + } + } + s->dsp.gmc1(dest_cb, ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding); + + ptr = ref_picture[2] + offset; + if(emu){ + ff_emulated_edge_mc(s->edge_emu_buffer, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1); + ptr= s->edge_emu_buffer; + } + s->dsp.gmc1(dest_cr, ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding); + + return; +} + +static inline void gmc_motion(MpegEncContext *s, + uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, + uint8_t **ref_picture) +{ + uint8_t *ptr; + int linesize, uvlinesize; + const int a= s->sprite_warping_accuracy; + int ox, oy; + + linesize = s->linesize; + uvlinesize = s->uvlinesize; + + ptr = ref_picture[0]; + + ox= s->sprite_offset[0][0] + s->sprite_delta[0][0]*s->mb_x*16 + s->sprite_delta[0][1]*s->mb_y*16; + oy= s->sprite_offset[0][1] + s->sprite_delta[1][0]*s->mb_x*16 + s->sprite_delta[1][1]*s->mb_y*16; + + s->dsp.gmc(dest_y, ptr, linesize, 16, + ox, + oy, + s->sprite_delta[0][0], s->sprite_delta[0][1], + s->sprite_delta[1][0], s->sprite_delta[1][1], + a+1, (1<<(2*a+1)) - s->no_rounding, + s->h_edge_pos, s->v_edge_pos); + s->dsp.gmc(dest_y+8, ptr, linesize, 16, + ox + s->sprite_delta[0][0]*8, + oy + s->sprite_delta[1][0]*8, + s->sprite_delta[0][0], s->sprite_delta[0][1], + s->sprite_delta[1][0], s->sprite_delta[1][1], + a+1, (1<<(2*a+1)) - s->no_rounding, + s->h_edge_pos, s->v_edge_pos); + + if(ENABLE_GRAY && s->flags&CODEC_FLAG_GRAY) return; + + ox= s->sprite_offset[1][0] + s->sprite_delta[0][0]*s->mb_x*8 + s->sprite_delta[0][1]*s->mb_y*8; + oy= s->sprite_offset[1][1] + s->sprite_delta[1][0]*s->mb_x*8 + s->sprite_delta[1][1]*s->mb_y*8; + + ptr = ref_picture[1]; + s->dsp.gmc(dest_cb, ptr, uvlinesize, 8, + ox, + oy, + s->sprite_delta[0][0], s->sprite_delta[0][1], + s->sprite_delta[1][0], s->sprite_delta[1][1], + a+1, (1<<(2*a+1)) - s->no_rounding, + s->h_edge_pos>>1, s->v_edge_pos>>1); + + ptr = ref_picture[2]; + s->dsp.gmc(dest_cr, ptr, uvlinesize, 8, + ox, + oy, + s->sprite_delta[0][0], s->sprite_delta[0][1], + s->sprite_delta[1][0], s->sprite_delta[1][1], + a+1, (1<<(2*a+1)) - s->no_rounding, + s->h_edge_pos>>1, s->v_edge_pos>>1); +} + +static inline int hpel_motion(MpegEncContext *s, + uint8_t *dest, uint8_t *src, + int field_based, int field_select, + int src_x, int src_y, + int width, int height, int stride, + int h_edge_pos, int v_edge_pos, + int w, int h, op_pixels_func *pix_op, + int motion_x, int motion_y) +{ + int dxy; + int emu=0; + + dxy = ((motion_y & 1) << 1) | (motion_x & 1); + src_x += motion_x >> 1; + src_y += motion_y >> 1; + + /* WARNING: do no forget half pels */ + src_x = av_clip(src_x, -16, width); //FIXME unneeded for emu? + if (src_x == width) + dxy &= ~1; + src_y = av_clip(src_y, -16, height); + if (src_y == height) + dxy &= ~2; + src += src_y * stride + src_x; + + if(s->unrestricted_mv && (s->flags&CODEC_FLAG_EMU_EDGE)){ + if( (unsigned)src_x > h_edge_pos - (motion_x&1) - w + || (unsigned)src_y > v_edge_pos - (motion_y&1) - h){ + ff_emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<v_edge_pos); + src= s->edge_emu_buffer; + emu=1; + } + } + if(field_select) + src += s->linesize; + pix_op[dxy](dest, src, stride, h); + return emu; +} + +static av_always_inline +void mpeg_motion_internal(MpegEncContext *s, + uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, + int field_based, int bottom_field, int field_select, + uint8_t **ref_picture, op_pixels_func (*pix_op)[4], + int motion_x, int motion_y, int h, int is_mpeg12) +{ + uint8_t *ptr_y, *ptr_cb, *ptr_cr; + int dxy, uvdxy, mx, my, src_x, src_y, + uvsrc_x, uvsrc_y, v_edge_pos, uvlinesize, linesize; + +#if 0 +if(s->quarter_sample) +{ + motion_x>>=1; + motion_y>>=1; +} +#endif + + v_edge_pos = s->v_edge_pos >> field_based; + linesize = s->current_picture.linesize[0] << field_based; + uvlinesize = s->current_picture.linesize[1] << field_based; + + dxy = ((motion_y & 1) << 1) | (motion_x & 1); + src_x = s->mb_x* 16 + (motion_x >> 1); + src_y =(s->mb_y<<(4-field_based)) + (motion_y >> 1); + + if (!is_mpeg12 && s->out_format == FMT_H263) { + if((s->workaround_bugs & FF_BUG_HPEL_CHROMA) && field_based){ + mx = (motion_x>>1)|(motion_x&1); + my = motion_y >>1; + uvdxy = ((my & 1) << 1) | (mx & 1); + uvsrc_x = s->mb_x* 8 + (mx >> 1); + uvsrc_y = (s->mb_y<<(3-field_based)) + (my >> 1); + }else{ + uvdxy = dxy | (motion_y & 2) | ((motion_x & 2) >> 1); + uvsrc_x = src_x>>1; + uvsrc_y = src_y>>1; + } + }else if(!is_mpeg12 && s->out_format == FMT_H261){//even chroma mv's are full pel in H261 + mx = motion_x / 4; + my = motion_y / 4; + uvdxy = 0; + uvsrc_x = s->mb_x*8 + mx; + uvsrc_y = s->mb_y*8 + my; + } else { + if(s->chroma_y_shift){ + mx = motion_x / 2; + my = motion_y / 2; + uvdxy = ((my & 1) << 1) | (mx & 1); + uvsrc_x = s->mb_x* 8 + (mx >> 1); + uvsrc_y = (s->mb_y<<(3-field_based)) + (my >> 1); + } else { + if(s->chroma_x_shift){ + //Chroma422 + mx = motion_x / 2; + uvdxy = ((motion_y & 1) << 1) | (mx & 1); + uvsrc_x = s->mb_x* 8 + (mx >> 1); + uvsrc_y = src_y; + } else { + //Chroma444 + uvdxy = dxy; + uvsrc_x = src_x; + uvsrc_y = src_y; + } + } + } + + ptr_y = ref_picture[0] + src_y * linesize + src_x; + ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x; + ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x; + + if( (unsigned)src_x > s->h_edge_pos - (motion_x&1) - 16 + || (unsigned)src_y > v_edge_pos - (motion_y&1) - h){ + if(is_mpeg12 || s->codec_id == CODEC_ID_MPEG2VIDEO || + s->codec_id == CODEC_ID_MPEG1VIDEO){ + av_log(s->avctx,AV_LOG_DEBUG, + "MPEG motion vector out of boundary\n"); + return ; + } + ff_emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, + 17, 17+field_based, + src_x, src_y<h_edge_pos, s->v_edge_pos); + ptr_y = s->edge_emu_buffer; + if(!ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY)){ + uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize; + ff_emulated_edge_mc(uvbuf , + ptr_cb, s->uvlinesize, + 9, 9+field_based, + uvsrc_x, uvsrc_y<h_edge_pos>>1, s->v_edge_pos>>1); + ff_emulated_edge_mc(uvbuf+16, + ptr_cr, s->uvlinesize, + 9, 9+field_based, + uvsrc_x, uvsrc_y<h_edge_pos>>1, s->v_edge_pos>>1); + ptr_cb= uvbuf; + ptr_cr= uvbuf+16; + } + } + + if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.data + dest_y += s->linesize; + dest_cb+= s->uvlinesize; + dest_cr+= s->uvlinesize; + } + + if(field_select){ + ptr_y += s->linesize; + ptr_cb+= s->uvlinesize; + ptr_cr+= s->uvlinesize; + } + + pix_op[0][dxy](dest_y, ptr_y, linesize, h); + + if(!ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY)){ + pix_op[s->chroma_x_shift][uvdxy] + (dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift); + pix_op[s->chroma_x_shift][uvdxy] + (dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift); + } + if(!is_mpeg12 && (ENABLE_H261_ENCODER || ENABLE_H261_DECODER) && + s->out_format == FMT_H261){ + ff_h261_loop_filter(s); + } +} +/* apply one mpeg motion vector to the three components */ +static av_always_inline +void mpeg_motion(MpegEncContext *s, + uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, + int field_based, int bottom_field, int field_select, + uint8_t **ref_picture, op_pixels_func (*pix_op)[4], + int motion_x, int motion_y, int h) +{ +#ifndef CONFIG_SMALL + if(s->out_format == FMT_MPEG1) + mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, field_based, + bottom_field, field_select, ref_picture, pix_op, + motion_x, motion_y, h, 1); + else +#endif + mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, field_based, + bottom_field, field_select, ref_picture, pix_op, + motion_x, motion_y, h, 0); +} + +//FIXME move to dsputil, avg variant, 16x16 version +static inline void put_obmc(uint8_t *dst, uint8_t *src[5], int stride){ + int x; + uint8_t * const top = src[1]; + uint8_t * const left = src[2]; + uint8_t * const mid = src[0]; + uint8_t * const right = src[3]; + uint8_t * const bottom= src[4]; +#define OBMC_FILTER(x, t, l, m, r, b)\ + dst[x]= (t*top[x] + l*left[x] + m*mid[x] + r*right[x] + b*bottom[x] + 4)>>3 +#define OBMC_FILTER4(x, t, l, m, r, b)\ + OBMC_FILTER(x , t, l, m, r, b);\ + OBMC_FILTER(x+1 , t, l, m, r, b);\ + OBMC_FILTER(x +stride, t, l, m, r, b);\ + OBMC_FILTER(x+1+stride, t, l, m, r, b); + + x=0; + OBMC_FILTER (x , 2, 2, 4, 0, 0); + OBMC_FILTER (x+1, 2, 1, 5, 0, 0); + OBMC_FILTER4(x+2, 2, 1, 5, 0, 0); + OBMC_FILTER4(x+4, 2, 0, 5, 1, 0); + OBMC_FILTER (x+6, 2, 0, 5, 1, 0); + OBMC_FILTER (x+7, 2, 0, 4, 2, 0); + x+= stride; + OBMC_FILTER (x , 1, 2, 5, 0, 0); + OBMC_FILTER (x+1, 1, 2, 5, 0, 0); + OBMC_FILTER (x+6, 1, 0, 5, 2, 0); + OBMC_FILTER (x+7, 1, 0, 5, 2, 0); + x+= stride; + OBMC_FILTER4(x , 1, 2, 5, 0, 0); + OBMC_FILTER4(x+2, 1, 1, 6, 0, 0); + OBMC_FILTER4(x+4, 1, 0, 6, 1, 0); + OBMC_FILTER4(x+6, 1, 0, 5, 2, 0); + x+= 2*stride; + OBMC_FILTER4(x , 0, 2, 5, 0, 1); + OBMC_FILTER4(x+2, 0, 1, 6, 0, 1); + OBMC_FILTER4(x+4, 0, 0, 6, 1, 1); + OBMC_FILTER4(x+6, 0, 0, 5, 2, 1); + x+= 2*stride; + OBMC_FILTER (x , 0, 2, 5, 0, 1); + OBMC_FILTER (x+1, 0, 2, 5, 0, 1); + OBMC_FILTER4(x+2, 0, 1, 5, 0, 2); + OBMC_FILTER4(x+4, 0, 0, 5, 1, 2); + OBMC_FILTER (x+6, 0, 0, 5, 2, 1); + OBMC_FILTER (x+7, 0, 0, 5, 2, 1); + x+= stride; + OBMC_FILTER (x , 0, 2, 4, 0, 2); + OBMC_FILTER (x+1, 0, 1, 5, 0, 2); + OBMC_FILTER (x+6, 0, 0, 5, 1, 2); + OBMC_FILTER (x+7, 0, 0, 4, 2, 2); +} + +/* obmc for 1 8x8 luma block */ +static inline void obmc_motion(MpegEncContext *s, + uint8_t *dest, uint8_t *src, + int src_x, int src_y, + op_pixels_func *pix_op, + int16_t mv[5][2]/* mid top left right bottom*/) +#define MID 0 +{ + int i; + uint8_t *ptr[5]; + + assert(s->quarter_sample==0); + + for(i=0; i<5; i++){ + if(i && mv[i][0]==mv[MID][0] && mv[i][1]==mv[MID][1]){ + ptr[i]= ptr[MID]; + }else{ + ptr[i]= s->obmc_scratchpad + 8*(i&1) + s->linesize*8*(i>>1); + hpel_motion(s, ptr[i], src, 0, 0, + src_x, src_y, + s->width, s->height, s->linesize, + s->h_edge_pos, s->v_edge_pos, + 8, 8, pix_op, + mv[i][0], mv[i][1]); + } + } + + put_obmc(dest, ptr, s->linesize); +} + +static inline void qpel_motion(MpegEncContext *s, + uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, + int field_based, int bottom_field, int field_select, + uint8_t **ref_picture, op_pixels_func (*pix_op)[4], + qpel_mc_func (*qpix_op)[16], + int motion_x, int motion_y, int h) +{ + uint8_t *ptr_y, *ptr_cb, *ptr_cr; + int dxy, uvdxy, mx, my, src_x, src_y, uvsrc_x, uvsrc_y, v_edge_pos, linesize, uvlinesize; + + dxy = ((motion_y & 3) << 2) | (motion_x & 3); + src_x = s->mb_x * 16 + (motion_x >> 2); + src_y = s->mb_y * (16 >> field_based) + (motion_y >> 2); + + v_edge_pos = s->v_edge_pos >> field_based; + linesize = s->linesize << field_based; + uvlinesize = s->uvlinesize << field_based; + + if(field_based){ + mx= motion_x/2; + my= motion_y>>1; + }else if(s->workaround_bugs&FF_BUG_QPEL_CHROMA2){ + static const int rtab[8]= {0,0,1,1,0,0,0,1}; + mx= (motion_x>>1) + rtab[motion_x&7]; + my= (motion_y>>1) + rtab[motion_y&7]; + }else if(s->workaround_bugs&FF_BUG_QPEL_CHROMA){ + mx= (motion_x>>1)|(motion_x&1); + my= (motion_y>>1)|(motion_y&1); + }else{ + mx= motion_x/2; + my= motion_y/2; + } + mx= (mx>>1)|(mx&1); + my= (my>>1)|(my&1); + + uvdxy= (mx&1) | ((my&1)<<1); + mx>>=1; + my>>=1; + + uvsrc_x = s->mb_x * 8 + mx; + uvsrc_y = s->mb_y * (8 >> field_based) + my; + + ptr_y = ref_picture[0] + src_y * linesize + src_x; + ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x; + ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x; + + if( (unsigned)src_x > s->h_edge_pos - (motion_x&3) - 16 + || (unsigned)src_y > v_edge_pos - (motion_y&3) - h ){ + ff_emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, + 17, 17+field_based, src_x, src_y<h_edge_pos, s->v_edge_pos); + ptr_y= s->edge_emu_buffer; + if(!ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY)){ + uint8_t *uvbuf= s->edge_emu_buffer + 18*s->linesize; + ff_emulated_edge_mc(uvbuf, ptr_cb, s->uvlinesize, + 9, 9 + field_based, + uvsrc_x, uvsrc_y<h_edge_pos>>1, s->v_edge_pos>>1); + ff_emulated_edge_mc(uvbuf + 16, ptr_cr, s->uvlinesize, + 9, 9 + field_based, + uvsrc_x, uvsrc_y<h_edge_pos>>1, s->v_edge_pos>>1); + ptr_cb= uvbuf; + ptr_cr= uvbuf + 16; + } + } + + if(!field_based) + qpix_op[0][dxy](dest_y, ptr_y, linesize); + else{ + if(bottom_field){ + dest_y += s->linesize; + dest_cb+= s->uvlinesize; + dest_cr+= s->uvlinesize; + } + + if(field_select){ + ptr_y += s->linesize; + ptr_cb += s->uvlinesize; + ptr_cr += s->uvlinesize; + } + //damn interlaced mode + //FIXME boundary mirroring is not exactly correct here + qpix_op[1][dxy](dest_y , ptr_y , linesize); + qpix_op[1][dxy](dest_y+8, ptr_y+8, linesize); + } + if(!ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY)){ + pix_op[1][uvdxy](dest_cr, ptr_cr, uvlinesize, h >> 1); + pix_op[1][uvdxy](dest_cb, ptr_cb, uvlinesize, h >> 1); + } +} + +/** + * h263 chroma 4mv motion compensation. + */ +static inline void chroma_4mv_motion(MpegEncContext *s, + uint8_t *dest_cb, uint8_t *dest_cr, + uint8_t **ref_picture, + op_pixels_func *pix_op, + int mx, int my){ + int dxy, emu=0, src_x, src_y, offset; + uint8_t *ptr; + + /* In case of 8X8, we construct a single chroma motion vector + with a special rounding */ + mx= ff_h263_round_chroma(mx); + my= ff_h263_round_chroma(my); + + dxy = ((my & 1) << 1) | (mx & 1); + mx >>= 1; + my >>= 1; + + src_x = s->mb_x * 8 + mx; + src_y = s->mb_y * 8 + my; + src_x = av_clip(src_x, -8, s->width/2); + if (src_x == s->width/2) + dxy &= ~1; + src_y = av_clip(src_y, -8, s->height/2); + if (src_y == s->height/2) + dxy &= ~2; + + offset = (src_y * (s->uvlinesize)) + src_x; + ptr = ref_picture[1] + offset; + if(s->flags&CODEC_FLAG_EMU_EDGE){ + if( (unsigned)src_x > (s->h_edge_pos>>1) - (dxy &1) - 8 + || (unsigned)src_y > (s->v_edge_pos>>1) - (dxy>>1) - 8){ + ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, + 9, 9, src_x, src_y, + s->h_edge_pos>>1, s->v_edge_pos>>1); + ptr= s->edge_emu_buffer; + emu=1; + } + } + pix_op[dxy](dest_cb, ptr, s->uvlinesize, 8); + + ptr = ref_picture[2] + offset; + if(emu){ + ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, + 9, 9, src_x, src_y, + s->h_edge_pos>>1, s->v_edge_pos>>1); + ptr= s->edge_emu_buffer; + } + pix_op[dxy](dest_cr, ptr, s->uvlinesize, 8); +} + +static inline void prefetch_motion(MpegEncContext *s, uint8_t **pix, int dir){ + /* fetch pixels for estimated mv 4 macroblocks ahead + * optimized for 64byte cache lines */ + const int shift = s->quarter_sample ? 2 : 1; + const int mx= (s->mv[dir][0][0]>>shift) + 16*s->mb_x + 8; + const int my= (s->mv[dir][0][1]>>shift) + 16*s->mb_y; + int off= mx + (my + (s->mb_x&3)*4)*s->linesize + 64; + s->dsp.prefetch(pix[0]+off, s->linesize, 4); + off= (mx>>1) + ((my>>1) + (s->mb_x&7))*s->uvlinesize + 64; + s->dsp.prefetch(pix[1]+off, pix[2]-pix[1], 2); +} + +/** + * motion compensation of a single macroblock + * @param s context + * @param dest_y luma destination pointer + * @param dest_cb chroma cb/u destination pointer + * @param dest_cr chroma cr/v destination pointer + * @param dir direction (0->forward, 1->backward) + * @param ref_picture array[3] of pointers to the 3 planes of the reference picture + * @param pic_op halfpel motion compensation function (average or put normally) + * @param pic_op qpel motion compensation function (average or put normally) + * the motion vectors are taken from s->mv and the MV type from s->mv_type + */ +static av_always_inline void MPV_motion_internal(MpegEncContext *s, + uint8_t *dest_y, uint8_t *dest_cb, + uint8_t *dest_cr, int dir, + uint8_t **ref_picture, + op_pixels_func (*pix_op)[4], + qpel_mc_func (*qpix_op)[16], int is_mpeg12) +{ + int dxy, mx, my, src_x, src_y, motion_x, motion_y; + int mb_x, mb_y, i; + uint8_t *ptr, *dest; + + mb_x = s->mb_x; + mb_y = s->mb_y; + + prefetch_motion(s, ref_picture, dir); + + if(!is_mpeg12 && s->obmc && s->pict_type != FF_B_TYPE){ + int16_t mv_cache[4][4][2]; + const int xy= s->mb_x + s->mb_y*s->mb_stride; + const int mot_stride= s->b8_stride; + const int mot_xy= mb_x*2 + mb_y*2*mot_stride; + + assert(!s->mb_skipped); + + memcpy(mv_cache[1][1], s->current_picture.motion_val[0][mot_xy ], sizeof(int16_t)*4); + memcpy(mv_cache[2][1], s->current_picture.motion_val[0][mot_xy+mot_stride], sizeof(int16_t)*4); + memcpy(mv_cache[3][1], s->current_picture.motion_val[0][mot_xy+mot_stride], sizeof(int16_t)*4); + + if(mb_y==0 || IS_INTRA(s->current_picture.mb_type[xy-s->mb_stride])){ + memcpy(mv_cache[0][1], mv_cache[1][1], sizeof(int16_t)*4); + }else{ + memcpy(mv_cache[0][1], s->current_picture.motion_val[0][mot_xy-mot_stride], sizeof(int16_t)*4); + } + + if(mb_x==0 || IS_INTRA(s->current_picture.mb_type[xy-1])){ + *(int32_t*)mv_cache[1][0]= *(int32_t*)mv_cache[1][1]; + *(int32_t*)mv_cache[2][0]= *(int32_t*)mv_cache[2][1]; + }else{ + *(int32_t*)mv_cache[1][0]= *(int32_t*)s->current_picture.motion_val[0][mot_xy-1]; + *(int32_t*)mv_cache[2][0]= *(int32_t*)s->current_picture.motion_val[0][mot_xy-1+mot_stride]; + } + + if(mb_x+1>=s->mb_width || IS_INTRA(s->current_picture.mb_type[xy+1])){ + *(int32_t*)mv_cache[1][3]= *(int32_t*)mv_cache[1][2]; + *(int32_t*)mv_cache[2][3]= *(int32_t*)mv_cache[2][2]; + }else{ + *(int32_t*)mv_cache[1][3]= *(int32_t*)s->current_picture.motion_val[0][mot_xy+2]; + *(int32_t*)mv_cache[2][3]= *(int32_t*)s->current_picture.motion_val[0][mot_xy+2+mot_stride]; + } + + mx = 0; + my = 0; + for(i=0;i<4;i++) { + const int x= (i&1)+1; + const int y= (i>>1)+1; + int16_t mv[5][2]= { + {mv_cache[y][x ][0], mv_cache[y][x ][1]}, + {mv_cache[y-1][x][0], mv_cache[y-1][x][1]}, + {mv_cache[y][x-1][0], mv_cache[y][x-1][1]}, + {mv_cache[y][x+1][0], mv_cache[y][x+1][1]}, + {mv_cache[y+1][x][0], mv_cache[y+1][x][1]}}; + //FIXME cleanup + obmc_motion(s, dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize, + ref_picture[0], + mb_x * 16 + (i & 1) * 8, mb_y * 16 + (i >>1) * 8, + pix_op[1], + mv); + + mx += mv[0][0]; + my += mv[0][1]; + } + if(!ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY)) + chroma_4mv_motion(s, dest_cb, dest_cr, ref_picture, pix_op[1], mx, my); + + return; + } + + switch(s->mv_type) { + case MV_TYPE_16X16: + if(s->mcsel){ + if(s->real_sprite_warping_points==1){ + gmc1_motion(s, dest_y, dest_cb, dest_cr, + ref_picture); + }else{ + gmc_motion(s, dest_y, dest_cb, dest_cr, + ref_picture); + } + }else if(!is_mpeg12 && s->quarter_sample){ + qpel_motion(s, dest_y, dest_cb, dest_cr, + 0, 0, 0, + ref_picture, pix_op, qpix_op, + s->mv[dir][0][0], s->mv[dir][0][1], 16); + }else if(!is_mpeg12 && ENABLE_WMV2 && s->mspel){ + ff_mspel_motion(s, dest_y, dest_cb, dest_cr, + ref_picture, pix_op, + s->mv[dir][0][0], s->mv[dir][0][1], 16); + }else + { + mpeg_motion(s, dest_y, dest_cb, dest_cr, + 0, 0, 0, + ref_picture, pix_op, + s->mv[dir][0][0], s->mv[dir][0][1], 16); + } + break; + case MV_TYPE_8X8: + if (!is_mpeg12) { + mx = 0; + my = 0; + if(s->quarter_sample){ + for(i=0;i<4;i++) { + motion_x = s->mv[dir][i][0]; + motion_y = s->mv[dir][i][1]; + + dxy = ((motion_y & 3) << 2) | (motion_x & 3); + src_x = mb_x * 16 + (motion_x >> 2) + (i & 1) * 8; + src_y = mb_y * 16 + (motion_y >> 2) + (i >>1) * 8; + + /* WARNING: do no forget half pels */ + src_x = av_clip(src_x, -16, s->width); + if (src_x == s->width) + dxy &= ~3; + src_y = av_clip(src_y, -16, s->height); + if (src_y == s->height) + dxy &= ~12; + + ptr = ref_picture[0] + (src_y * s->linesize) + (src_x); + if(s->flags&CODEC_FLAG_EMU_EDGE){ + if( (unsigned)src_x > s->h_edge_pos - (motion_x&3) - 8 + || (unsigned)src_y > s->v_edge_pos - (motion_y&3) - 8 ){ + ff_emulated_edge_mc(s->edge_emu_buffer, ptr, + s->linesize, 9, 9, + src_x, src_y, + s->h_edge_pos, s->v_edge_pos); + ptr= s->edge_emu_buffer; + } + } + dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize; + qpix_op[1][dxy](dest, ptr, s->linesize); + + mx += s->mv[dir][i][0]/2; + my += s->mv[dir][i][1]/2; + } + }else{ + for(i=0;i<4;i++) { + hpel_motion(s, dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize, + ref_picture[0], 0, 0, + mb_x * 16 + (i & 1) * 8, mb_y * 16 + (i >>1) * 8, + s->width, s->height, s->linesize, + s->h_edge_pos, s->v_edge_pos, + 8, 8, pix_op[1], + s->mv[dir][i][0], s->mv[dir][i][1]); + + mx += s->mv[dir][i][0]; + my += s->mv[dir][i][1]; + } + } + + if(!ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY)) + chroma_4mv_motion(s, dest_cb, dest_cr, ref_picture, pix_op[1], mx, my); + } + break; + case MV_TYPE_FIELD: + if (s->picture_structure == PICT_FRAME) { + if(!is_mpeg12 && s->quarter_sample){ + for(i=0; i<2; i++){ + qpel_motion(s, dest_y, dest_cb, dest_cr, + 1, i, s->field_select[dir][i], + ref_picture, pix_op, qpix_op, + s->mv[dir][i][0], s->mv[dir][i][1], 8); + } + }else{ + /* top field */ + mpeg_motion(s, dest_y, dest_cb, dest_cr, + 1, 0, s->field_select[dir][0], + ref_picture, pix_op, + s->mv[dir][0][0], s->mv[dir][0][1], 8); + /* bottom field */ + mpeg_motion(s, dest_y, dest_cb, dest_cr, + 1, 1, s->field_select[dir][1], + ref_picture, pix_op, + s->mv[dir][1][0], s->mv[dir][1][1], 8); + } + } else { + if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != FF_B_TYPE && !s->first_field){ + ref_picture= s->current_picture_ptr->data; + } + + mpeg_motion(s, dest_y, dest_cb, dest_cr, + 0, 0, s->field_select[dir][0], + ref_picture, pix_op, + s->mv[dir][0][0], s->mv[dir][0][1], 16); + } + break; + case MV_TYPE_16X8: + for(i=0; i<2; i++){ + uint8_t ** ref2picture; + + if(s->picture_structure == s->field_select[dir][i] + 1 + || s->pict_type == FF_B_TYPE || s->first_field){ + ref2picture= ref_picture; + }else{ + ref2picture= s->current_picture_ptr->data; + } + + mpeg_motion(s, dest_y, dest_cb, dest_cr, + 0, 0, s->field_select[dir][i], + ref2picture, pix_op, + s->mv[dir][i][0], s->mv[dir][i][1] + 16*i, 8); + + dest_y += 16*s->linesize; + dest_cb+= (16>>s->chroma_y_shift)*s->uvlinesize; + dest_cr+= (16>>s->chroma_y_shift)*s->uvlinesize; + } + break; + case MV_TYPE_DMV: + if(s->picture_structure == PICT_FRAME){ + for(i=0; i<2; i++){ + int j; + for(j=0; j<2; j++){ + mpeg_motion(s, dest_y, dest_cb, dest_cr, + 1, j, j^i, + ref_picture, pix_op, + s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], 8); + } + pix_op = s->dsp.avg_pixels_tab; + } + }else{ + for(i=0; i<2; i++){ + mpeg_motion(s, dest_y, dest_cb, dest_cr, + 0, 0, s->picture_structure != i+1, + ref_picture, pix_op, + s->mv[dir][2*i][0],s->mv[dir][2*i][1],16); + + // after put we make avg of the same block + pix_op=s->dsp.avg_pixels_tab; + + //opposite parity is always in the same frame if this is second field + if(!s->first_field){ + ref_picture = s->current_picture_ptr->data; + } + } + } + break; + default: assert(0); + } +} + +static inline void MPV_motion(MpegEncContext *s, + uint8_t *dest_y, uint8_t *dest_cb, + uint8_t *dest_cr, int dir, + uint8_t **ref_picture, + op_pixels_func (*pix_op)[4], + qpel_mc_func (*qpix_op)[16]) +{ +#ifndef CONFIG_SMALL + if(s->out_format == FMT_MPEG1) + MPV_motion_internal(s, dest_y, dest_cb, dest_cr, dir, + ref_picture, pix_op, qpix_op, 1); + else +#endif + MPV_motion_internal(s, dest_y, dest_cb, dest_cr, dir, + ref_picture, pix_op, qpix_op, 0); +} +#endif /* FFMPEG_MPEGVIDEO_COMMON_H */ diff --git a/src/add-ons/media/plugins/avcodec/libavcodec/mpegvideo_enc.c b/src/add-ons/media/plugins/avcodec/libavcodec/mpegvideo_enc.c new file mode 100644 index 0000000000..f7600beb3c --- /dev/null +++ b/src/add-ons/media/plugins/avcodec/libavcodec/mpegvideo_enc.c @@ -0,0 +1,3863 @@ +/* + * The simplest mpeg encoder (well, it was the simplest!) + * Copyright (c) 2000,2001 Fabrice Bellard. + * Copyright (c) 2002-2004 Michael Niedermayer + * + * 4MV & hq & B-frame encoding stuff by Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file mpegvideo_enc.c + * The simplest mpeg encoder (well, it was the simplest!). + */ + +#include "avcodec.h" +#include "dsputil.h" +#include "mpegvideo.h" +#include "mpegvideo_common.h" +#include "mjpegenc.h" +#include "msmpeg4.h" +#include "h263.h" +#include "faandct.h" +#include + +//#undef NDEBUG +//#include + +static int encode_picture(MpegEncContext *s, int picture_number); +static int dct_quantize_refine(MpegEncContext *s, DCTELEM *block, int16_t *weight, DCTELEM *orig, int n, int qscale); +static int sse_mb(MpegEncContext *s); + +/* enable all paranoid tests for rounding, overflows, etc... */ +//#define PARANOID + +//#define DEBUG + +static const uint16_t aanscales[64] = { + /* precomputed values scaled up by 14 bits */ + 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520, + 22725, 31521, 29692, 26722, 22725, 17855, 12299, 6270, + 21407, 29692, 27969, 25172, 21407, 16819, 11585, 5906, + 19266, 26722, 25172, 22654, 19266, 15137, 10426, 5315, + 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520, + 12873, 17855, 16819, 15137, 12873, 10114, 6967, 3552, + 8867 , 12299, 11585, 10426, 8867, 6967, 4799, 2446, + 4520 , 6270, 5906, 5315, 4520, 3552, 2446, 1247 +}; + +static const uint16_t inv_aanscales[64] = { + 4096, 2953, 3135, 3483, 4096, 5213, 7568, 14846, + 2953, 2129, 2260, 2511, 2953, 3759, 5457, 10703, + 3135, 2260, 2399, 2666, 3135, 3990, 5793, 11363, + 3483, 2511, 2666, 2962, 3483, 4433, 6436, 12625, + 4096, 2953, 3135, 3483, 4096, 5213, 7568, 14846, + 5213, 3759, 3990, 4433, 5213, 6635, 9633, 18895, + 7568, 5457, 5793, 6436, 7568, 9633, 13985, 27432, + 14846, 10703, 11363, 12625, 14846, 18895, 27432, 53809, +}; + +static uint8_t default_mv_penalty[MAX_FCODE+1][MAX_MV*2+1]; +static uint8_t default_fcode_tab[MAX_MV*2+1]; + +void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64], uint16_t (*qmat16)[2][64], + const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra) +{ + int qscale; + int shift=0; + + for(qscale=qmin; qscale<=qmax; qscale++){ + int i; + if (dsp->fdct == ff_jpeg_fdct_islow +#ifdef FAAN_POSTSCALE + || dsp->fdct == ff_faandct +#endif + ) { + for(i=0;i<64;i++) { + const int j= dsp->idct_permutation[i]; + /* 16 <= qscale * quant_matrix[i] <= 7905 */ + /* 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026 */ + /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */ + /* 3444240 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */ + + qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) / + (qscale * quant_matrix[j])); + } + } else if (dsp->fdct == fdct_ifast +#ifndef FAAN_POSTSCALE + || dsp->fdct == ff_faandct +#endif + ) { + for(i=0;i<64;i++) { + const int j= dsp->idct_permutation[i]; + /* 16 <= qscale * quant_matrix[i] <= 7905 */ + /* 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026 */ + /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */ + /* 3444240 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */ + + qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) / + (aanscales[i] * qscale * quant_matrix[j])); + } + } else { + for(i=0;i<64;i++) { + const int j= dsp->idct_permutation[i]; + /* We can safely suppose that 16 <= quant_matrix[i] <= 255 + So 16 <= qscale * quant_matrix[i] <= 7905 + so (1<<19) / 16 >= (1<<19) / (qscale * quant_matrix[i]) >= (1<<19) / 7905 + so 32768 >= (1<<19) / (qscale * quant_matrix[i]) >= 67 + */ + qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) / (qscale * quant_matrix[j])); +// qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[i]); + qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[j]); + + if(qmat16[qscale][0][i]==0 || qmat16[qscale][0][i]==128*256) qmat16[qscale][0][i]=128*256-1; + qmat16[qscale][1][i]= ROUNDED_DIV(bias<<(16-QUANT_BIAS_SHIFT), qmat16[qscale][0][i]); + } + } + + for(i=intra; i<64; i++){ + int64_t max= 8191; + if (dsp->fdct == fdct_ifast +#ifndef FAAN_POSTSCALE + || dsp->fdct == ff_faandct +#endif + ) { + max= (8191LL*aanscales[i]) >> 14; + } + while(((max * qmat[qscale][i]) >> shift) > INT_MAX){ + shift++; + } + } + } + if(shift){ + av_log(NULL, AV_LOG_INFO, "Warning, QMAT_SHIFT is larger than %d, overflows possible\n", QMAT_SHIFT - shift); + } +} + +static inline void update_qscale(MpegEncContext *s){ + s->qscale= (s->lambda*139 + FF_LAMBDA_SCALE*64) >> (FF_LAMBDA_SHIFT + 7); + s->qscale= av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax); + + s->lambda2= (s->lambda*s->lambda + FF_LAMBDA_SCALE/2) >> FF_LAMBDA_SHIFT; +} + +void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix){ + int i; + + if(matrix){ + put_bits(pb, 1, 1); + for(i=0;i<64;i++) { + put_bits(pb, 8, matrix[ ff_zigzag_direct[i] ]); + } + }else + put_bits(pb, 1, 0); +} + +static void copy_picture_attributes(MpegEncContext *s, AVFrame *dst, AVFrame *src){ + int i; + + dst->pict_type = src->pict_type; + dst->quality = src->quality; + dst->coded_picture_number = src->coded_picture_number; + dst->display_picture_number = src->display_picture_number; +// dst->reference = src->reference; + dst->pts = src->pts; + dst->interlaced_frame = src->interlaced_frame; + dst->top_field_first = src->top_field_first; + + if(s->avctx->me_threshold){ + if(!src->motion_val[0]) + av_log(s->avctx, AV_LOG_ERROR, "AVFrame.motion_val not set!\n"); + if(!src->mb_type) + av_log(s->avctx, AV_LOG_ERROR, "AVFrame.mb_type not set!\n"); + if(!src->ref_index[0]) + av_log(s->avctx, AV_LOG_ERROR, "AVFrame.ref_index not set!\n"); + if(src->motion_subsample_log2 != dst->motion_subsample_log2) + av_log(s->avctx, AV_LOG_ERROR, "AVFrame.motion_subsample_log2 doesn't match! (%d!=%d)\n", + src->motion_subsample_log2, dst->motion_subsample_log2); + + memcpy(dst->mb_type, src->mb_type, s->mb_stride * s->mb_height * sizeof(dst->mb_type[0])); + + for(i=0; i<2; i++){ + int stride= ((16*s->mb_width )>>src->motion_subsample_log2) + 1; + int height= ((16*s->mb_height)>>src->motion_subsample_log2); + + if(src->motion_val[i] && src->motion_val[i] != dst->motion_val[i]){ + memcpy(dst->motion_val[i], src->motion_val[i], 2*stride*height*sizeof(int16_t)); + } + if(src->ref_index[i] && src->ref_index[i] != dst->ref_index[i]){ + memcpy(dst->ref_index[i], src->ref_index[i], s->b8_stride*2*s->mb_height*sizeof(int8_t)); + } + } + } +} + +static void update_duplicate_context_after_me(MpegEncContext *dst, MpegEncContext *src){ +#define COPY(a) dst->a= src->a + COPY(pict_type); + COPY(current_picture); + COPY(f_code); + COPY(b_code); + COPY(qscale); + COPY(lambda); + COPY(lambda2); + COPY(picture_in_gop_number); + COPY(gop_picture_number); + COPY(frame_pred_frame_dct); //FIXME don't set in encode_header + COPY(progressive_frame); //FIXME don't set in encode_header + COPY(partitioned_frame); //FIXME don't set in encode_header +#undef COPY +} + +/** + * sets the given MpegEncContext to defaults for encoding. + * the changed fields will not depend upon the prior state of the MpegEncContext. + */ +static void MPV_encode_defaults(MpegEncContext *s){ + int i; + MPV_common_defaults(s); + + for(i=-16; i<16; i++){ + default_fcode_tab[i + MAX_MV]= 1; + } + s->me.mv_penalty= default_mv_penalty; + s->fcode_tab= default_fcode_tab; +} + +/* init video encoder */ +av_cold int MPV_encode_init(AVCodecContext *avctx) +{ + MpegEncContext *s = avctx->priv_data; + int i; + int chroma_h_shift, chroma_v_shift; + + MPV_encode_defaults(s); + + switch (avctx->codec_id) { + case CODEC_ID_MPEG2VIDEO: + if(avctx->pix_fmt != PIX_FMT_YUV420P && avctx->pix_fmt != PIX_FMT_YUV422P){ + av_log(avctx, AV_LOG_ERROR, "only YUV420 and YUV422 are supported\n"); + return -1; + } + break; + case CODEC_ID_LJPEG: + case CODEC_ID_MJPEG: + if(avctx->pix_fmt != PIX_FMT_YUVJ420P && avctx->pix_fmt != PIX_FMT_YUVJ422P && + ((avctx->pix_fmt != PIX_FMT_YUV420P && avctx->pix_fmt != PIX_FMT_YUV422P) || avctx->strict_std_compliance>FF_COMPLIANCE_INOFFICIAL)){ + av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n"); + return -1; + } + break; + default: + if(avctx->pix_fmt != PIX_FMT_YUV420P){ + av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n"); + return -1; + } + } + + switch (avctx->pix_fmt) { + case PIX_FMT_YUVJ422P: + case PIX_FMT_YUV422P: + s->chroma_format = CHROMA_422; + break; + case PIX_FMT_YUVJ420P: + case PIX_FMT_YUV420P: + default: + s->chroma_format = CHROMA_420; + break; + } + + s->bit_rate = avctx->bit_rate; + s->width = avctx->width; + s->height = avctx->height; + if(avctx->gop_size > 600 && avctx->strict_std_compliance>FF_COMPLIANCE_EXPERIMENTAL){ + av_log(avctx, AV_LOG_ERROR, "Warning keyframe interval too large! reducing it ...\n"); + avctx->gop_size=600; + } + s->gop_size = avctx->gop_size; + s->avctx = avctx; + s->flags= avctx->flags; + s->flags2= avctx->flags2; + s->max_b_frames= avctx->max_b_frames; + s->codec_id= avctx->codec->id; + s->luma_elim_threshold = avctx->luma_elim_threshold; + s->chroma_elim_threshold= avctx->chroma_elim_threshold; + s->strict_std_compliance= avctx->strict_std_compliance; + s->data_partitioning= avctx->flags & CODEC_FLAG_PART; + s->quarter_sample= (avctx->flags & CODEC_FLAG_QPEL)!=0; + s->mpeg_quant= avctx->mpeg_quant; + s->rtp_mode= !!avctx->rtp_payload_size; + s->intra_dc_precision= avctx->intra_dc_precision; + s->user_specified_pts = AV_NOPTS_VALUE; + + if (s->gop_size <= 1) { + s->intra_only = 1; + s->gop_size = 12; + } else { + s->intra_only = 0; + } + + s->me_method = avctx->me_method; + + /* Fixed QSCALE */ + s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE); + + s->adaptive_quant= ( s->avctx->lumi_masking + || s->avctx->dark_masking + || s->avctx->temporal_cplx_masking + || s->avctx->spatial_cplx_masking + || s->avctx->p_masking + || s->avctx->border_masking + || (s->flags&CODEC_FLAG_QP_RD)) + && !s->fixed_qscale; + + s->obmc= !!(s->flags & CODEC_FLAG_OBMC); + s->loop_filter= !!(s->flags & CODEC_FLAG_LOOP_FILTER); + s->alternate_scan= !!(s->flags & CODEC_FLAG_ALT_SCAN); + s->intra_vlc_format= !!(s->flags2 & CODEC_FLAG2_INTRA_VLC); + s->q_scale_type= !!(s->flags2 & CODEC_FLAG2_NON_LINEAR_QUANT); + +#if LIBAVCODEC_VERSION_INT < ((52<<16)+(0<<8)+0) + if (s->flags & CODEC_FLAG_TRELLIS_QUANT) + avctx->trellis = 1; +#endif + + if(avctx->rc_max_rate && !avctx->rc_buffer_size){ + av_log(avctx, AV_LOG_ERROR, "a vbv buffer size is needed, for encoding with a maximum bitrate\n"); + return -1; + } + + if(avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate){ + av_log(avctx, AV_LOG_INFO, "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n"); + } + + if(avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate){ + av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n"); + return -1; + } + + if(avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate){ + av_log(avctx, AV_LOG_INFO, "bitrate above max bitrate\n"); + return -1; + } + + if(avctx->rc_max_rate && avctx->rc_max_rate == avctx->bit_rate && avctx->rc_max_rate != avctx->rc_min_rate){ + av_log(avctx, AV_LOG_INFO, "impossible bitrate constraints, this will fail\n"); + } + + if(avctx->rc_buffer_size && avctx->bit_rate*av_q2d(avctx->time_base) > avctx->rc_buffer_size){ + av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n"); + return -1; + } + + if(avctx->bit_rate*av_q2d(avctx->time_base) > avctx->bit_rate_tolerance){ + av_log(avctx, AV_LOG_ERROR, "bitrate tolerance too small for bitrate\n"); + return -1; + } + + if( s->avctx->rc_max_rate && s->avctx->rc_min_rate == s->avctx->rc_max_rate + && (s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO) + && 90000LL * (avctx->rc_buffer_size-1) > s->avctx->rc_max_rate*0xFFFFLL){ + + av_log(avctx, AV_LOG_INFO, "Warning vbv_delay will be set to 0xFFFF (=VBR) as the specified vbv buffer is too large for the given bitrate!\n"); + } + + if((s->flags & CODEC_FLAG_4MV) && s->codec_id != CODEC_ID_MPEG4 + && s->codec_id != CODEC_ID_H263 && s->codec_id != CODEC_ID_H263P && s->codec_id != CODEC_ID_FLV1){ + av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n"); + return -1; + } + + if(s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE){ + av_log(avctx, AV_LOG_ERROR, "OBMC is only supported with simple mb decision\n"); + return -1; + } + + if(s->obmc && s->codec_id != CODEC_ID_H263 && s->codec_id != CODEC_ID_H263P){ + av_log(avctx, AV_LOG_ERROR, "OBMC is only supported with H263(+)\n"); + return -1; + } + + if(s->quarter_sample && s->codec_id != CODEC_ID_MPEG4){ + av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n"); + return -1; + } + + if(s->data_partitioning && s->codec_id != CODEC_ID_MPEG4){ + av_log(avctx, AV_LOG_ERROR, "data partitioning not supported by codec\n"); + return -1; + } + + if(s->max_b_frames && s->codec_id != CODEC_ID_MPEG4 && s->codec_id != CODEC_ID_MPEG1VIDEO && s->codec_id != CODEC_ID_MPEG2VIDEO){ + av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n"); + return -1; + } + + if((s->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME|CODEC_FLAG_ALT_SCAN)) + && s->codec_id != CODEC_ID_MPEG4 && s->codec_id != CODEC_ID_MPEG2VIDEO){ + av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n"); + return -1; + } + + if(s->mpeg_quant && s->codec_id != CODEC_ID_MPEG4){ //FIXME mpeg2 uses that too + av_log(avctx, AV_LOG_ERROR, "mpeg2 style quantization not supported by codec\n"); + return -1; + } + + if((s->flags & CODEC_FLAG_CBP_RD) && !avctx->trellis){ + av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n"); + return -1; + } + + if((s->flags & CODEC_FLAG_QP_RD) && s->avctx->mb_decision != FF_MB_DECISION_RD){ + av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n"); + return -1; + } + + if(s->avctx->scenechange_threshold < 1000000000 && (s->flags & CODEC_FLAG_CLOSED_GOP)){ + av_log(avctx, AV_LOG_ERROR, "closed gop with scene change detection are not supported yet, set threshold to 1000000000\n"); + return -1; + } + + if((s->flags2 & CODEC_FLAG2_INTRA_VLC) && s->codec_id != CODEC_ID_MPEG2VIDEO){ + av_log(avctx, AV_LOG_ERROR, "intra vlc table not supported by codec\n"); + return -1; + } + + if(s->flags & CODEC_FLAG_LOW_DELAY){ + if (s->codec_id != CODEC_ID_MPEG2VIDEO){ + av_log(avctx, AV_LOG_ERROR, "low delay forcing is only available for mpeg2\n"); + return -1; + } + if (s->max_b_frames != 0){ + av_log(avctx, AV_LOG_ERROR, "b frames cannot be used with low delay\n"); + return -1; + } + } + + if(s->q_scale_type == 1){ + if(s->codec_id != CODEC_ID_MPEG2VIDEO){ + av_log(avctx, AV_LOG_ERROR, "non linear quant is only available for mpeg2\n"); + return -1; + } + if(avctx->qmax > 12){ + av_log(avctx, AV_LOG_ERROR, "non linear quant only supports qmax <= 12 currently\n"); + return -1; + } + } + + if(s->avctx->thread_count > 1 && s->codec_id != CODEC_ID_MPEG4 + && s->codec_id != CODEC_ID_MPEG1VIDEO && s->codec_id != CODEC_ID_MPEG2VIDEO + && (s->codec_id != CODEC_ID_H263P || !(s->flags & CODEC_FLAG_H263P_SLICE_STRUCT))){ + av_log(avctx, AV_LOG_ERROR, "multi threaded encoding not supported by codec\n"); + return -1; + } + + if(s->avctx->thread_count > 1) + s->rtp_mode= 1; + + if(!avctx->time_base.den || !avctx->time_base.num){ + av_log(avctx, AV_LOG_ERROR, "framerate not set\n"); + return -1; + } + + i= (INT_MAX/2+128)>>8; + if(avctx->me_threshold >= i){ + av_log(avctx, AV_LOG_ERROR, "me_threshold too large, max is %d\n", i - 1); + return -1; + } + if(avctx->mb_threshold >= i){ + av_log(avctx, AV_LOG_ERROR, "mb_threshold too large, max is %d\n", i - 1); + return -1; + } + + if(avctx->b_frame_strategy && (avctx->flags&CODEC_FLAG_PASS2)){ + av_log(avctx, AV_LOG_INFO, "notice: b_frame_strategy only affects the first pass\n"); + avctx->b_frame_strategy = 0; + } + + i= ff_gcd(avctx->time_base.den, avctx->time_base.num); + if(i > 1){ + av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n"); + avctx->time_base.den /= i; + avctx->time_base.num /= i; +// return -1; + } + + if(s->codec_id==CODEC_ID_MJPEG){ + s->intra_quant_bias= 1<<(QUANT_BIAS_SHIFT-1); //(a + x/2)/x + s->inter_quant_bias= 0; + }else if(s->mpeg_quant || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO){ + s->intra_quant_bias= 3<<(QUANT_BIAS_SHIFT-3); //(a + x*3/8)/x + s->inter_quant_bias= 0; + }else{ + s->intra_quant_bias=0; + s->inter_quant_bias=-(1<<(QUANT_BIAS_SHIFT-2)); //(a - x/4)/x + } + + if(avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS) + s->intra_quant_bias= avctx->intra_quant_bias; + if(avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS) + s->inter_quant_bias= avctx->inter_quant_bias; + + avcodec_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift, &chroma_v_shift); + + if(avctx->codec_id == CODEC_ID_MPEG4 && s->avctx->time_base.den > (1<<16)-1){ + av_log(avctx, AV_LOG_ERROR, "timebase not supported by mpeg 4 standard\n"); + return -1; + } + s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1; + + switch(avctx->codec->id) { + case CODEC_ID_MPEG1VIDEO: + s->out_format = FMT_MPEG1; + s->low_delay= !!(s->flags & CODEC_FLAG_LOW_DELAY); + avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1); + break; + case CODEC_ID_MPEG2VIDEO: + s->out_format = FMT_MPEG1; + s->low_delay= !!(s->flags & CODEC_FLAG_LOW_DELAY); + avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1); + s->rtp_mode= 1; + break; + case CODEC_ID_LJPEG: + case CODEC_ID_MJPEG: + s->out_format = FMT_MJPEG; + s->intra_only = 1; /* force intra only for jpeg */ + s->mjpeg_vsample[0] = 2; + s->mjpeg_vsample[1] = 2>>chroma_v_shift; + s->mjpeg_vsample[2] = 2>>chroma_v_shift; + s->mjpeg_hsample[0] = 2; + s->mjpeg_hsample[1] = 2>>chroma_h_shift; + s->mjpeg_hsample[2] = 2>>chroma_h_shift; + if (!(ENABLE_MJPEG_ENCODER || ENABLE_LJPEG_ENCODER) + || ff_mjpeg_encode_init(s) < 0) + return -1; + avctx->delay=0; + s->low_delay=1; + break; + case CODEC_ID_H261: + if (!ENABLE_H261_ENCODER) return -1; + if (ff_h261_get_picture_format(s->width, s->height) < 0) { + av_log(avctx, AV_LOG_ERROR, "The specified picture size of %dx%d is not valid for the H.261 codec.\nValid sizes are 176x144, 352x288\n", s->width, s->height); + return -1; + } + s->out_format = FMT_H261; + avctx->delay=0; + s->low_delay=1; + break; + case CODEC_ID_H263: + if (!ENABLE_H263_ENCODER) return -1; + if (h263_get_picture_format(s->width, s->height) == 7) { + av_log(avctx, AV_LOG_INFO, "The specified picture size of %dx%d is not valid for the H.263 codec.\nValid sizes are 128x96, 176x144, 352x288, 704x576, and 1408x1152. Try H.263+.\n", s->width, s->height); + return -1; + } + s->out_format = FMT_H263; + s->obmc= (avctx->flags & CODEC_FLAG_OBMC) ? 1:0; + avctx->delay=0; + s->low_delay=1; + break; + case CODEC_ID_H263P: + s->out_format = FMT_H263; + s->h263_plus = 1; + /* Fx */ + s->umvplus = (avctx->flags & CODEC_FLAG_H263P_UMV) ? 1:0; + s->h263_aic= (avctx->flags & CODEC_FLAG_AC_PRED) ? 1:0; + s->modified_quant= s->h263_aic; + s->alt_inter_vlc= (avctx->flags & CODEC_FLAG_H263P_AIV) ? 1:0; + s->obmc= (avctx->flags & CODEC_FLAG_OBMC) ? 1:0; + s->loop_filter= (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1:0; + s->unrestricted_mv= s->obmc || s->loop_filter || s->umvplus; + s->h263_slice_structured= (s->flags & CODEC_FLAG_H263P_SLICE_STRUCT) ? 1:0; + + /* /Fx */ + /* These are just to be sure */ + avctx->delay=0; + s->low_delay=1; + break; + case CODEC_ID_FLV1: + s->out_format = FMT_H263; + s->h263_flv = 2; /* format = 1; 11-bit codes */ + s->unrestricted_mv = 1; + s->rtp_mode=0; /* don't allow GOB */ + avctx->delay=0; + s->low_delay=1; + break; + case CODEC_ID_RV10: + s->out_format = FMT_H263; + avctx->delay=0; + s->low_delay=1; + break; + case CODEC_ID_RV20: + s->out_format = FMT_H263; + avctx->delay=0; + s->low_delay=1; + s->modified_quant=1; + s->h263_aic=1; + s->h263_plus=1; + s->loop_filter=1; + s->unrestricted_mv= s->obmc || s->loop_filter || s->umvplus; + break; + case CODEC_ID_MPEG4: + s->out_format = FMT_H263; + s->h263_pred = 1; + s->unrestricted_mv = 1; + s->low_delay= s->max_b_frames ? 0 : 1; + avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1); + break; + case CODEC_ID_MSMPEG4V1: + s->out_format = FMT_H263; + s->h263_msmpeg4 = 1; + s->h263_pred = 1; + s->unrestricted_mv = 1; + s->msmpeg4_version= 1; + avctx->delay=0; + s->low_delay=1; + break; + case CODEC_ID_MSMPEG4V2: + s->out_format = FMT_H263; + s->h263_msmpeg4 = 1; + s->h263_pred = 1; + s->unrestricted_mv = 1; + s->msmpeg4_version= 2; + avctx->delay=0; + s->low_delay=1; + break; + case CODEC_ID_MSMPEG4V3: + s->out_format = FMT_H263; + s->h263_msmpeg4 = 1; + s->h263_pred = 1; + s->unrestricted_mv = 1; + s->msmpeg4_version= 3; + s->flipflop_rounding=1; + avctx->delay=0; + s->low_delay=1; + break; + case CODEC_ID_WMV1: + s->out_format = FMT_H263; + s->h263_msmpeg4 = 1; + s->h263_pred = 1; + s->unrestricted_mv = 1; + s->msmpeg4_version= 4; + s->flipflop_rounding=1; + avctx->delay=0; + s->low_delay=1; + break; + case CODEC_ID_WMV2: + s->out_format = FMT_H263; + s->h263_msmpeg4 = 1; + s->h263_pred = 1; + s->unrestricted_mv = 1; + s->msmpeg4_version= 5; + s->flipflop_rounding=1; + avctx->delay=0; + s->low_delay=1; + break; + default: + return -1; + } + + avctx->has_b_frames= !s->low_delay; + + s->encoding = 1; + + /* init */ + if (MPV_common_init(s) < 0) + return -1; + + if(!s->dct_quantize) + s->dct_quantize = dct_quantize_c; + if(!s->denoise_dct) + s->denoise_dct = denoise_dct_c; + s->fast_dct_quantize = s->dct_quantize; + if(avctx->trellis) + s->dct_quantize = dct_quantize_trellis_c; + + if((ENABLE_H263P_ENCODER || ENABLE_RV20_ENCODER) && s->modified_quant) + s->chroma_qscale_table= ff_h263_chroma_qscale_table; + s->progressive_frame= + s->progressive_sequence= !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME|CODEC_FLAG_ALT_SCAN)); + s->quant_precision=5; + + ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp); + ff_set_cmp(&s->dsp, s->dsp.frame_skip_cmp, s->avctx->frame_skip_cmp); + + if (ENABLE_H261_ENCODER && s->out_format == FMT_H261) + ff_h261_encode_init(s); + if (ENABLE_ANY_H263_ENCODER && s->out_format == FMT_H263) + h263_encode_init(s); + if (ENABLE_MSMPEG4_ENCODER && s->msmpeg4_version) + ff_msmpeg4_encode_init(s); + if ((ENABLE_MPEG1VIDEO_ENCODER || ENABLE_MPEG2VIDEO_ENCODER) + && s->out_format == FMT_MPEG1) + ff_mpeg1_encode_init(s); + + /* init q matrix */ + for(i=0;i<64;i++) { + int j= s->dsp.idct_permutation[i]; + if(ENABLE_MPEG4_ENCODER && s->codec_id==CODEC_ID_MPEG4 && s->mpeg_quant){ + s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i]; + s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i]; + }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){ + s->intra_matrix[j] = + s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i]; + }else + { /* mpeg1/2 */ + s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i]; + s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i]; + } + if(s->avctx->intra_matrix) + s->intra_matrix[j] = s->avctx->intra_matrix[i]; + if(s->avctx->inter_matrix) + s->inter_matrix[j] = s->avctx->inter_matrix[i]; + } + + /* precompute matrix */ + /* for mjpeg, we do include qscale in the matrix */ + if (s->out_format != FMT_MJPEG) { + ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16, + s->intra_matrix, s->intra_quant_bias, avctx->qmin, 31, 1); + ff_convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16, + s->inter_matrix, s->inter_quant_bias, avctx->qmin, 31, 0); + } + + if(ff_rate_control_init(s) < 0) + return -1; + + return 0; +} + +av_cold int MPV_encode_end(AVCodecContext *avctx) +{ + MpegEncContext *s = avctx->priv_data; + + ff_rate_control_uninit(s); + + MPV_common_end(s); + if ((ENABLE_MJPEG_ENCODER || ENABLE_LJPEG_ENCODER) && s->out_format == FMT_MJPEG) + ff_mjpeg_encode_close(s); + + av_freep(&avctx->extradata); + + return 0; +} + +static int get_sae(uint8_t *src, int ref, int stride){ + int x,y; + int acc=0; + + for(y=0; y<16; y++){ + for(x=0; x<16; x++){ + acc+= FFABS(src[x+y*stride] - ref); + } + } + + return acc; +} + +static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int stride){ + int x, y, w, h; + int acc=0; + + w= s->width &~15; + h= s->height&~15; + + for(y=0; ydsp.sad[0](NULL, src + offset, ref + offset, stride, 16); + int mean= (s->dsp.pix_sum(src + offset, stride) + 128)>>8; + int sae = get_sae(src + offset, mean, stride); + + acc+= sae + 500 < sad; + } + } + return acc; +} + + +static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){ + AVFrame *pic=NULL; + int64_t pts; + int i; + const int encoding_delay= s->max_b_frames; + int direct=1; + + if(pic_arg){ + pts= pic_arg->pts; + pic_arg->display_picture_number= s->input_picture_number++; + + if(pts != AV_NOPTS_VALUE){ + if(s->user_specified_pts != AV_NOPTS_VALUE){ + int64_t time= pts; + int64_t last= s->user_specified_pts; + + if(time <= last){ + av_log(s->avctx, AV_LOG_ERROR, "Error, Invalid timestamp=%"PRId64", last=%"PRId64"\n", pts, s->user_specified_pts); + return -1; + } + } + s->user_specified_pts= pts; + }else{ + if(s->user_specified_pts != AV_NOPTS_VALUE){ + s->user_specified_pts= + pts= s->user_specified_pts + 1; + av_log(s->avctx, AV_LOG_INFO, "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n", pts); + }else{ + pts= pic_arg->display_picture_number; + } + } + } + + if(pic_arg){ + if(encoding_delay && !(s->flags&CODEC_FLAG_INPUT_PRESERVED)) direct=0; + if(pic_arg->linesize[0] != s->linesize) direct=0; + if(pic_arg->linesize[1] != s->uvlinesize) direct=0; + if(pic_arg->linesize[2] != s->uvlinesize) direct=0; + +// av_log(AV_LOG_DEBUG, "%d %d %d %d\n",pic_arg->linesize[0], pic_arg->linesize[1], s->linesize, s->uvlinesize); + + if(direct){ + i= ff_find_unused_picture(s, 1); + + pic= (AVFrame*)&s->picture[i]; + pic->reference= 3; + + for(i=0; i<4; i++){ + pic->data[i]= pic_arg->data[i]; + pic->linesize[i]= pic_arg->linesize[i]; + } + alloc_picture(s, (Picture*)pic, 1); + }else{ + i= ff_find_unused_picture(s, 0); + + pic= (AVFrame*)&s->picture[i]; + pic->reference= 3; + + alloc_picture(s, (Picture*)pic, 0); + + if( pic->data[0] + INPLACE_OFFSET == pic_arg->data[0] + && pic->data[1] + INPLACE_OFFSET == pic_arg->data[1] + && pic->data[2] + INPLACE_OFFSET == pic_arg->data[2]){ + // empty + }else{ + int h_chroma_shift, v_chroma_shift; + avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift); + + for(i=0; i<3; i++){ + int src_stride= pic_arg->linesize[i]; + int dst_stride= i ? s->uvlinesize : s->linesize; + int h_shift= i ? h_chroma_shift : 0; + int v_shift= i ? v_chroma_shift : 0; + int w= s->width >>h_shift; + int h= s->height>>v_shift; + uint8_t *src= pic_arg->data[i]; + uint8_t *dst= pic->data[i]; + + if(!s->avctx->rc_buffer_size) + dst +=INPLACE_OFFSET; + + if(src_stride==dst_stride) + memcpy(dst, src, src_stride*h); + else{ + while(h--){ + memcpy(dst, src, w); + dst += dst_stride; + src += src_stride; + } + } + } + } + } + copy_picture_attributes(s, pic, pic_arg); + pic->pts= pts; //we set this here to avoid modifiying pic_arg + } + + /* shift buffer entries */ + for(i=1; iencoding_delay+1*/; i++) + s->input_picture[i-1]= s->input_picture[i]; + + s->input_picture[encoding_delay]= (Picture*)pic; + + return 0; +} + +static int skip_check(MpegEncContext *s, Picture *p, Picture *ref){ + int x, y, plane; + int score=0; + int64_t score64=0; + + for(plane=0; plane<3; plane++){ + const int stride= p->linesize[plane]; + const int bw= plane ? 1 : 2; + for(y=0; ymb_height*bw; y++){ + for(x=0; xmb_width*bw; x++){ + int off= p->type == FF_BUFFER_TYPE_SHARED ? 0: 16; + int v= s->dsp.frame_skip_cmp[1](s, p->data[plane] + 8*(x + y*stride)+off, ref->data[plane] + 8*(x + y*stride), stride, 8); + + switch(s->avctx->frame_skip_exp){ + case 0: score= FFMAX(score, v); break; + case 1: score+= FFABS(v);break; + case 2: score+= v*v;break; + case 3: score64+= FFABS(v*v*(int64_t)v);break; + case 4: score64+= v*v*(int64_t)(v*v);break; + } + } + } + } + + if(score) score64= score; + + if(score64 < s->avctx->frame_skip_threshold) + return 1; + if(score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda)>>8)) + return 1; + return 0; +} + +static int estimate_best_b_count(MpegEncContext *s){ + AVCodec *codec= avcodec_find_encoder(s->avctx->codec_id); + AVCodecContext *c= avcodec_alloc_context(); + AVFrame input[FF_MAX_B_FRAMES+2]; + const int scale= s->avctx->brd_scale; + int i, j, out_size, p_lambda, b_lambda, lambda2; + int outbuf_size= s->width * s->height; //FIXME + uint8_t *outbuf= av_malloc(outbuf_size); + int64_t best_rd= INT64_MAX; + int best_b_count= -1; + + assert(scale>=0 && scale <=3); + +// emms_c(); + p_lambda= s->last_lambda_for[FF_P_TYPE]; //s->next_picture_ptr->quality; + b_lambda= s->last_lambda_for[FF_B_TYPE]; //p_lambda *FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset; + if(!b_lambda) b_lambda= p_lambda; //FIXME we should do this somewhere else + lambda2= (b_lambda*b_lambda + (1<> FF_LAMBDA_SHIFT; + + c->width = s->width >> scale; + c->height= s->height>> scale; + c->flags= CODEC_FLAG_QSCALE | CODEC_FLAG_PSNR | CODEC_FLAG_INPUT_PRESERVED /*| CODEC_FLAG_EMU_EDGE*/; + c->flags|= s->avctx->flags & CODEC_FLAG_QPEL; + c->mb_decision= s->avctx->mb_decision; + c->me_cmp= s->avctx->me_cmp; + c->mb_cmp= s->avctx->mb_cmp; + c->me_sub_cmp= s->avctx->me_sub_cmp; + c->pix_fmt = PIX_FMT_YUV420P; + c->time_base= s->avctx->time_base; + c->max_b_frames= s->max_b_frames; + + if (avcodec_open(c, codec) < 0) + return -1; + + for(i=0; imax_b_frames+2; i++){ + int ysize= c->width*c->height; + int csize= (c->width/2)*(c->height/2); + Picture pre_input, *pre_input_ptr= i ? s->input_picture[i-1] : s->next_picture_ptr; + + avcodec_get_frame_defaults(&input[i]); + input[i].data[0]= av_malloc(ysize + 2*csize); + input[i].data[1]= input[i].data[0] + ysize; + input[i].data[2]= input[i].data[1] + csize; + input[i].linesize[0]= c->width; + input[i].linesize[1]= + input[i].linesize[2]= c->width/2; + + if(pre_input_ptr && (!i || s->input_picture[i-1])) { + pre_input= *pre_input_ptr; + + if(pre_input.type != FF_BUFFER_TYPE_SHARED && i) { + pre_input.data[0]+=INPLACE_OFFSET; + pre_input.data[1]+=INPLACE_OFFSET; + pre_input.data[2]+=INPLACE_OFFSET; + } + + s->dsp.shrink[scale](input[i].data[0], input[i].linesize[0], pre_input.data[0], pre_input.linesize[0], c->width, c->height); + s->dsp.shrink[scale](input[i].data[1], input[i].linesize[1], pre_input.data[1], pre_input.linesize[1], c->width>>1, c->height>>1); + s->dsp.shrink[scale](input[i].data[2], input[i].linesize[2], pre_input.data[2], pre_input.linesize[2], c->width>>1, c->height>>1); + } + } + + for(j=0; jmax_b_frames+1; j++){ + int64_t rd=0; + + if(!s->input_picture[j]) + break; + + c->error[0]= c->error[1]= c->error[2]= 0; + + input[0].pict_type= FF_I_TYPE; + input[0].quality= 1 * FF_QP2LAMBDA; + out_size = avcodec_encode_video(c, outbuf, outbuf_size, &input[0]); +// rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT; + + for(i=0; imax_b_frames+1; i++){ + int is_p= i % (j+1) == j || i==s->max_b_frames; + + input[i+1].pict_type= is_p ? FF_P_TYPE : FF_B_TYPE; + input[i+1].quality= is_p ? p_lambda : b_lambda; + out_size = avcodec_encode_video(c, outbuf, outbuf_size, &input[i+1]); + rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3); + } + + /* get the delayed frames */ + while(out_size){ + out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL); + rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3); + } + + rd += c->error[0] + c->error[1] + c->error[2]; + + if(rd < best_rd){ + best_rd= rd; + best_b_count= j; + } + } + + av_freep(&outbuf); + avcodec_close(c); + av_freep(&c); + + for(i=0; imax_b_frames+2; i++){ + av_freep(&input[i].data[0]); + } + + return best_b_count; +} + +static void select_input_picture(MpegEncContext *s){ + int i; + + for(i=1; ireordered_input_picture[i-1]= s->reordered_input_picture[i]; + s->reordered_input_picture[MAX_PICTURE_COUNT-1]= NULL; + + /* set next picture type & ordering */ + if(s->reordered_input_picture[0]==NULL && s->input_picture[0]){ + if(/*s->picture_in_gop_number >= s->gop_size ||*/ s->next_picture_ptr==NULL || s->intra_only){ + s->reordered_input_picture[0]= s->input_picture[0]; + s->reordered_input_picture[0]->pict_type= FF_I_TYPE; + s->reordered_input_picture[0]->coded_picture_number= s->coded_picture_number++; + }else{ + int b_frames; + + if(s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor){ + if(s->picture_in_gop_number < s->gop_size && skip_check(s, s->input_picture[0], s->next_picture_ptr)){ + //FIXME check that te gop check above is +-1 correct +//av_log(NULL, AV_LOG_DEBUG, "skip %p %"PRId64"\n", s->input_picture[0]->data[0], s->input_picture[0]->pts); + + if(s->input_picture[0]->type == FF_BUFFER_TYPE_SHARED){ + for(i=0; i<4; i++) + s->input_picture[0]->data[i]= NULL; + s->input_picture[0]->type= 0; + }else{ + assert( s->input_picture[0]->type==FF_BUFFER_TYPE_USER + || s->input_picture[0]->type==FF_BUFFER_TYPE_INTERNAL); + + s->avctx->release_buffer(s->avctx, (AVFrame*)s->input_picture[0]); + } + + emms_c(); + ff_vbv_update(s, 0); + + goto no_output_pic; + } + } + + if(s->flags&CODEC_FLAG_PASS2){ + for(i=0; imax_b_frames+1; i++){ + int pict_num= s->input_picture[0]->display_picture_number + i; + + if(pict_num >= s->rc_context.num_entries) + break; + if(!s->input_picture[i]){ + s->rc_context.entry[pict_num-1].new_pict_type = FF_P_TYPE; + break; + } + + s->input_picture[i]->pict_type= + s->rc_context.entry[pict_num].new_pict_type; + } + } + + if(s->avctx->b_frame_strategy==0){ + b_frames= s->max_b_frames; + while(b_frames && !s->input_picture[b_frames]) b_frames--; + }else if(s->avctx->b_frame_strategy==1){ + for(i=1; imax_b_frames+1; i++){ + if(s->input_picture[i] && s->input_picture[i]->b_frame_score==0){ + s->input_picture[i]->b_frame_score= + get_intra_count(s, s->input_picture[i ]->data[0], + s->input_picture[i-1]->data[0], s->linesize) + 1; + } + } + for(i=0; imax_b_frames+1; i++){ + if(s->input_picture[i]==NULL || s->input_picture[i]->b_frame_score - 1 > s->mb_num/s->avctx->b_sensitivity) break; + } + + b_frames= FFMAX(0, i-1); + + /* reset scores */ + for(i=0; iinput_picture[i]->b_frame_score=0; + } + }else if(s->avctx->b_frame_strategy==2){ + b_frames= estimate_best_b_count(s); + }else{ + av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n"); + b_frames=0; + } + + emms_c(); +//static int b_count=0; +//b_count+= b_frames; +//av_log(s->avctx, AV_LOG_DEBUG, "b_frames: %d\n", b_count); + + for(i= b_frames - 1; i>=0; i--){ + int type= s->input_picture[i]->pict_type; + if(type && type != FF_B_TYPE) + b_frames= i; + } + if(s->input_picture[b_frames]->pict_type == FF_B_TYPE && b_frames == s->max_b_frames){ + av_log(s->avctx, AV_LOG_ERROR, "warning, too many b frames in a row\n"); + } + + if(s->picture_in_gop_number + b_frames >= s->gop_size){ + if((s->flags2 & CODEC_FLAG2_STRICT_GOP) && s->gop_size > s->picture_in_gop_number){ + b_frames= s->gop_size - s->picture_in_gop_number - 1; + }else{ + if(s->flags & CODEC_FLAG_CLOSED_GOP) + b_frames=0; + s->input_picture[b_frames]->pict_type= FF_I_TYPE; + } + } + + if( (s->flags & CODEC_FLAG_CLOSED_GOP) + && b_frames + && s->input_picture[b_frames]->pict_type== FF_I_TYPE) + b_frames--; + + s->reordered_input_picture[0]= s->input_picture[b_frames]; + if(s->reordered_input_picture[0]->pict_type != FF_I_TYPE) + s->reordered_input_picture[0]->pict_type= FF_P_TYPE; + s->reordered_input_picture[0]->coded_picture_number= s->coded_picture_number++; + for(i=0; ireordered_input_picture[i+1]= s->input_picture[i]; + s->reordered_input_picture[i+1]->pict_type= FF_B_TYPE; + s->reordered_input_picture[i+1]->coded_picture_number= s->coded_picture_number++; + } + } + } +no_output_pic: + if(s->reordered_input_picture[0]){ + s->reordered_input_picture[0]->reference= s->reordered_input_picture[0]->pict_type!=FF_B_TYPE ? 3 : 0; + + copy_picture(&s->new_picture, s->reordered_input_picture[0]); + + if(s->reordered_input_picture[0]->type == FF_BUFFER_TYPE_SHARED || s->avctx->rc_buffer_size){ + // input is a shared pix, so we can't modifiy it -> alloc a new one & ensure that the shared one is reuseable + + int i= ff_find_unused_picture(s, 0); + Picture *pic= &s->picture[i]; + + pic->reference = s->reordered_input_picture[0]->reference; + alloc_picture(s, pic, 0); + + /* mark us unused / free shared pic */ + if(s->reordered_input_picture[0]->type == FF_BUFFER_TYPE_INTERNAL) + s->avctx->release_buffer(s->avctx, (AVFrame*)s->reordered_input_picture[0]); + for(i=0; i<4; i++) + s->reordered_input_picture[0]->data[i]= NULL; + s->reordered_input_picture[0]->type= 0; + + copy_picture_attributes(s, (AVFrame*)pic, (AVFrame*)s->reordered_input_picture[0]); + + s->current_picture_ptr= pic; + }else{ + // input is not a shared pix -> reuse buffer for current_pix + + assert( s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_USER + || s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_INTERNAL); + + s->current_picture_ptr= s->reordered_input_picture[0]; + for(i=0; i<4; i++){ + s->new_picture.data[i]+= INPLACE_OFFSET; + } + } + copy_picture(&s->current_picture, s->current_picture_ptr); + + s->picture_number= s->new_picture.display_picture_number; +//printf("dpn:%d\n", s->picture_number); + }else{ + memset(&s->new_picture, 0, sizeof(Picture)); + } +} + +int MPV_encode_picture(AVCodecContext *avctx, + unsigned char *buf, int buf_size, void *data) +{ + MpegEncContext *s = avctx->priv_data; + AVFrame *pic_arg = data; + int i, stuffing_count; + + for(i=0; ithread_count; i++){ + int start_y= s->thread_context[i]->start_mb_y; + int end_y= s->thread_context[i]-> end_mb_y; + int h= s->mb_height; + uint8_t *start= buf + (size_t)(((int64_t) buf_size)*start_y/h); + uint8_t *end = buf + (size_t)(((int64_t) buf_size)* end_y/h); + + init_put_bits(&s->thread_context[i]->pb, start, end - start); + } + + s->picture_in_gop_number++; + + if(load_input_picture(s, pic_arg) < 0) + return -1; + + select_input_picture(s); + + /* output? */ + if(s->new_picture.data[0]){ + s->pict_type= s->new_picture.pict_type; +//emms_c(); +//printf("qs:%f %f %d\n", s->new_picture.quality, s->current_picture.quality, s->qscale); + MPV_frame_start(s, avctx); +vbv_retry: + if (encode_picture(s, s->picture_number) < 0) + return -1; + + avctx->real_pict_num = s->picture_number; + avctx->header_bits = s->header_bits; + avctx->mv_bits = s->mv_bits; + avctx->misc_bits = s->misc_bits; + avctx->i_tex_bits = s->i_tex_bits; + avctx->p_tex_bits = s->p_tex_bits; + avctx->i_count = s->i_count; + avctx->p_count = s->mb_num - s->i_count - s->skip_count; //FIXME f/b_count in avctx + avctx->skip_count = s->skip_count; + + MPV_frame_end(s); + + if (ENABLE_MJPEG_ENCODER && s->out_format == FMT_MJPEG) + ff_mjpeg_encode_picture_trailer(s); + + if(avctx->rc_buffer_size){ + RateControlContext *rcc= &s->rc_context; + int max_size= rcc->buffer_index/3; + + if(put_bits_count(&s->pb) > max_size && s->lambda < s->avctx->lmax){ + s->next_lambda= FFMAX(s->lambda+1, s->lambda*(s->qscale+1) / s->qscale); + if(s->adaptive_quant){ + int i; + for(i=0; imb_height*s->mb_stride; i++) + s->lambda_table[i]= FFMAX(s->lambda_table[i]+1, s->lambda_table[i]*(s->qscale+1) / s->qscale); + } + s->mb_skipped = 0; //done in MPV_frame_start() + if(s->pict_type==FF_P_TYPE){ //done in encode_picture() so we must undo it + if(s->flipflop_rounding || s->codec_id == CODEC_ID_H263P || s->codec_id == CODEC_ID_MPEG4) + s->no_rounding ^= 1; + } + if(s->pict_type!=FF_B_TYPE){ + s->time_base= s->last_time_base; + s->last_non_b_time= s->time - s->pp_time; + } +// av_log(NULL, AV_LOG_ERROR, "R:%d ", s->next_lambda); + for(i=0; ithread_count; i++){ + PutBitContext *pb= &s->thread_context[i]->pb; + init_put_bits(pb, pb->buf, pb->buf_end - pb->buf); + } + goto vbv_retry; + } + + assert(s->avctx->rc_max_rate); + } + + if(s->flags&CODEC_FLAG_PASS1) + ff_write_pass1_stats(s); + + for(i=0; i<4; i++){ + s->current_picture_ptr->error[i]= s->current_picture.error[i]; + avctx->error[i] += s->current_picture_ptr->error[i]; + } + + if(s->flags&CODEC_FLAG_PASS1) + assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits + avctx->i_tex_bits + avctx->p_tex_bits == put_bits_count(&s->pb)); + flush_put_bits(&s->pb); + s->frame_bits = put_bits_count(&s->pb); + + stuffing_count= ff_vbv_update(s, s->frame_bits); + if(stuffing_count){ + if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < stuffing_count + 50){ + av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n"); + return -1; + } + + switch(s->codec_id){ + case CODEC_ID_MPEG1VIDEO: + case CODEC_ID_MPEG2VIDEO: + while(stuffing_count--){ + put_bits(&s->pb, 8, 0); + } + break; + case CODEC_ID_MPEG4: + put_bits(&s->pb, 16, 0); + put_bits(&s->pb, 16, 0x1C3); + stuffing_count -= 4; + while(stuffing_count--){ + put_bits(&s->pb, 8, 0xFF); + } + break; + default: + av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n"); + } + flush_put_bits(&s->pb); + s->frame_bits = put_bits_count(&s->pb); + } + + /* update mpeg1/2 vbv_delay for CBR */ + if(s->avctx->rc_max_rate && s->avctx->rc_min_rate == s->avctx->rc_max_rate && s->out_format == FMT_MPEG1 + && 90000LL * (avctx->rc_buffer_size-1) <= s->avctx->rc_max_rate*0xFFFFLL){ + int vbv_delay; + + assert(s->repeat_first_field==0); + + vbv_delay= lrintf(90000 * s->rc_context.buffer_index / s->avctx->rc_max_rate); + assert(vbv_delay < 0xFFFF); + + s->vbv_delay_ptr[0] &= 0xF8; + s->vbv_delay_ptr[0] |= vbv_delay>>13; + s->vbv_delay_ptr[1] = vbv_delay>>5; + s->vbv_delay_ptr[2] &= 0x07; + s->vbv_delay_ptr[2] |= vbv_delay<<3; + } + s->total_bits += s->frame_bits; + avctx->frame_bits = s->frame_bits; + }else{ + assert((pbBufPtr(&s->pb) == s->pb.buf)); + s->frame_bits=0; + } + assert((s->frame_bits&7)==0); + + return s->frame_bits/8; +} + +static inline void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold) +{ + static const char tab[64]= + {3,2,2,1,1,1,1,1, + 1,1,1,1,1,1,1,1, + 1,1,1,1,1,1,1,1, + 0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0}; + int score=0; + int run=0; + int i; + DCTELEM *block= s->block[n]; + const int last_index= s->block_last_index[n]; + int skip_dc; + + if(threshold<0){ + skip_dc=0; + threshold= -threshold; + }else + skip_dc=1; + + /* Are all we could set to zero already zero? */ + if(last_index<=skip_dc - 1) return; + + for(i=0; i<=last_index; i++){ + const int j = s->intra_scantable.permutated[i]; + const int level = FFABS(block[j]); + if(level==1){ + if(skip_dc && i==0) continue; + score+= tab[run]; + run=0; + }else if(level>1){ + return; + }else{ + run++; + } + } + if(score >= threshold) return; + for(i=skip_dc; i<=last_index; i++){ + const int j = s->intra_scantable.permutated[i]; + block[j]=0; + } + if(block[0]) s->block_last_index[n]= 0; + else s->block_last_index[n]= -1; +} + +static inline void clip_coeffs(MpegEncContext *s, DCTELEM *block, int last_index) +{ + int i; + const int maxlevel= s->max_qcoeff; + const int minlevel= s->min_qcoeff; + int overflow=0; + + if(s->mb_intra){ + i=1; //skip clipping of intra dc + }else + i=0; + + for(;i<=last_index; i++){ + const int j= s->intra_scantable.permutated[i]; + int level = block[j]; + + if (level>maxlevel){ + level=maxlevel; + overflow++; + }else if(levelavctx->mb_decision == FF_MB_DECISION_SIMPLE) + av_log(s->avctx, AV_LOG_INFO, "warning, clipping %d dct coefficients to %d..%d\n", overflow, minlevel, maxlevel); +} + +static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride){ + int x, y; +//FIXME optimize + for(y=0; y<8; y++){ + for(x=0; x<8; x++){ + int x2, y2; + int sum=0; + int sqr=0; + int count=0; + + for(y2= FFMAX(y-1, 0); y2 < FFMIN(8, y+2); y2++){ + for(x2= FFMAX(x-1, 0); x2 < FFMIN(8, x+2); x2++){ + int v= ptr[x2 + y2*stride]; + sum += v; + sqr += v*v; + count++; + } + } + weight[x + 8*y]= (36*ff_sqrt(count*sqr - sum*sum)) / count; + } + } +} + +static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, int motion_y, int mb_block_height, int mb_block_count) +{ + int16_t weight[8][64]; + DCTELEM orig[8][64]; + const int mb_x= s->mb_x; + const int mb_y= s->mb_y; + int i; + int skip_dct[8]; + int dct_offset = s->linesize*8; //default for progressive frames + uint8_t *ptr_y, *ptr_cb, *ptr_cr; + int wrap_y, wrap_c; + + for(i=0; iskipdct; + + if(s->adaptive_quant){ + const int last_qp= s->qscale; + const int mb_xy= mb_x + mb_y*s->mb_stride; + + s->lambda= s->lambda_table[mb_xy]; + update_qscale(s); + + if(!(s->flags&CODEC_FLAG_QP_RD)){ + s->qscale= s->current_picture_ptr->qscale_table[mb_xy]; + s->dquant= s->qscale - last_qp; + + if(s->out_format==FMT_H263){ + s->dquant= av_clip(s->dquant, -2, 2); + + if(s->codec_id==CODEC_ID_MPEG4){ + if(!s->mb_intra){ + if(s->pict_type == FF_B_TYPE){ + if(s->dquant&1 || s->mv_dir&MV_DIRECT) + s->dquant= 0; + } + if(s->mv_type==MV_TYPE_8X8) + s->dquant=0; + } + } + } + } + ff_set_qscale(s, last_qp + s->dquant); + }else if(s->flags&CODEC_FLAG_QP_RD) + ff_set_qscale(s, s->qscale + s->dquant); + + wrap_y = s->linesize; + wrap_c = s->uvlinesize; + ptr_y = s->new_picture.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16; + ptr_cb = s->new_picture.data[1] + (mb_y * mb_block_height * wrap_c) + mb_x * 8; + ptr_cr = s->new_picture.data[2] + (mb_y * mb_block_height * wrap_c) + mb_x * 8; + + if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){ + uint8_t *ebuf= s->edge_emu_buffer + 32; + ff_emulated_edge_mc(ebuf , ptr_y , wrap_y,16,16,mb_x*16,mb_y*16, s->width , s->height); + ptr_y= ebuf; + ff_emulated_edge_mc(ebuf+18*wrap_y , ptr_cb, wrap_c, 8, mb_block_height, mb_x*8, mb_y*8, s->width>>1, s->height>>1); + ptr_cb= ebuf+18*wrap_y; + ff_emulated_edge_mc(ebuf+18*wrap_y+8, ptr_cr, wrap_c, 8, mb_block_height, mb_x*8, mb_y*8, s->width>>1, s->height>>1); + ptr_cr= ebuf+18*wrap_y+8; + } + + if (s->mb_intra) { + if(s->flags&CODEC_FLAG_INTERLACED_DCT){ + int progressive_score, interlaced_score; + + s->interlaced_dct=0; + progressive_score= s->dsp.ildct_cmp[4](s, ptr_y , NULL, wrap_y, 8) + +s->dsp.ildct_cmp[4](s, ptr_y + wrap_y*8, NULL, wrap_y, 8) - 400; + + if(progressive_score > 0){ + interlaced_score = s->dsp.ildct_cmp[4](s, ptr_y , NULL, wrap_y*2, 8) + +s->dsp.ildct_cmp[4](s, ptr_y + wrap_y , NULL, wrap_y*2, 8); + if(progressive_score > interlaced_score){ + s->interlaced_dct=1; + + dct_offset= wrap_y; + wrap_y<<=1; + if (s->chroma_format == CHROMA_422) + wrap_c<<=1; + } + } + } + + s->dsp.get_pixels(s->block[0], ptr_y , wrap_y); + s->dsp.get_pixels(s->block[1], ptr_y + 8, wrap_y); + s->dsp.get_pixels(s->block[2], ptr_y + dct_offset , wrap_y); + s->dsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y); + + if(s->flags&CODEC_FLAG_GRAY){ + skip_dct[4]= 1; + skip_dct[5]= 1; + }else{ + s->dsp.get_pixels(s->block[4], ptr_cb, wrap_c); + s->dsp.get_pixels(s->block[5], ptr_cr, wrap_c); + if(!s->chroma_y_shift){ /* 422 */ + s->dsp.get_pixels(s->block[6], ptr_cb + (dct_offset>>1), wrap_c); + s->dsp.get_pixels(s->block[7], ptr_cr + (dct_offset>>1), wrap_c); + } + } + }else{ + op_pixels_func (*op_pix)[4]; + qpel_mc_func (*op_qpix)[16]; + uint8_t *dest_y, *dest_cb, *dest_cr; + + dest_y = s->dest[0]; + dest_cb = s->dest[1]; + dest_cr = s->dest[2]; + + if ((!s->no_rounding) || s->pict_type==FF_B_TYPE){ + op_pix = s->dsp.put_pixels_tab; + op_qpix= s->dsp.put_qpel_pixels_tab; + }else{ + op_pix = s->dsp.put_no_rnd_pixels_tab; + op_qpix= s->dsp.put_no_rnd_qpel_pixels_tab; + } + + if (s->mv_dir & MV_DIR_FORWARD) { + MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix); + op_pix = s->dsp.avg_pixels_tab; + op_qpix= s->dsp.avg_qpel_pixels_tab; + } + if (s->mv_dir & MV_DIR_BACKWARD) { + MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix); + } + + if(s->flags&CODEC_FLAG_INTERLACED_DCT){ + int progressive_score, interlaced_score; + + s->interlaced_dct=0; + progressive_score= s->dsp.ildct_cmp[0](s, dest_y , ptr_y , wrap_y, 8) + +s->dsp.ildct_cmp[0](s, dest_y + wrap_y*8, ptr_y + wrap_y*8, wrap_y, 8) - 400; + + if(s->avctx->ildct_cmp == FF_CMP_VSSE) progressive_score -= 400; + + if(progressive_score>0){ + interlaced_score = s->dsp.ildct_cmp[0](s, dest_y , ptr_y , wrap_y*2, 8) + +s->dsp.ildct_cmp[0](s, dest_y + wrap_y , ptr_y + wrap_y , wrap_y*2, 8); + + if(progressive_score > interlaced_score){ + s->interlaced_dct=1; + + dct_offset= wrap_y; + wrap_y<<=1; + if (s->chroma_format == CHROMA_422) + wrap_c<<=1; + } + } + } + + s->dsp.diff_pixels(s->block[0], ptr_y , dest_y , wrap_y); + s->dsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y); + s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset , dest_y + dct_offset , wrap_y); + s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8, dest_y + dct_offset + 8, wrap_y); + + if(s->flags&CODEC_FLAG_GRAY){ + skip_dct[4]= 1; + skip_dct[5]= 1; + }else{ + s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c); + s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c); + if(!s->chroma_y_shift){ /* 422 */ + s->dsp.diff_pixels(s->block[6], ptr_cb + (dct_offset>>1), dest_cb + (dct_offset>>1), wrap_c); + s->dsp.diff_pixels(s->block[7], ptr_cr + (dct_offset>>1), dest_cr + (dct_offset>>1), wrap_c); + } + } + /* pre quantization */ + if(s->current_picture.mc_mb_var[s->mb_stride*mb_y+ mb_x]<2*s->qscale*s->qscale){ + //FIXME optimize + if(s->dsp.sad[1](NULL, ptr_y , dest_y , wrap_y, 8) < 20*s->qscale) skip_dct[0]= 1; + if(s->dsp.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20*s->qscale) skip_dct[1]= 1; + if(s->dsp.sad[1](NULL, ptr_y +dct_offset , dest_y +dct_offset , wrap_y, 8) < 20*s->qscale) skip_dct[2]= 1; + if(s->dsp.sad[1](NULL, ptr_y +dct_offset+ 8, dest_y +dct_offset+ 8, wrap_y, 8) < 20*s->qscale) skip_dct[3]= 1; + if(s->dsp.sad[1](NULL, ptr_cb , dest_cb , wrap_c, 8) < 20*s->qscale) skip_dct[4]= 1; + if(s->dsp.sad[1](NULL, ptr_cr , dest_cr , wrap_c, 8) < 20*s->qscale) skip_dct[5]= 1; + if(!s->chroma_y_shift){ /* 422 */ + if(s->dsp.sad[1](NULL, ptr_cb +(dct_offset>>1), dest_cb +(dct_offset>>1), wrap_c, 8) < 20*s->qscale) skip_dct[6]= 1; + if(s->dsp.sad[1](NULL, ptr_cr +(dct_offset>>1), dest_cr +(dct_offset>>1), wrap_c, 8) < 20*s->qscale) skip_dct[7]= 1; + } + } + } + + if(s->avctx->quantizer_noise_shaping){ + if(!skip_dct[0]) get_visual_weight(weight[0], ptr_y , wrap_y); + if(!skip_dct[1]) get_visual_weight(weight[1], ptr_y + 8, wrap_y); + if(!skip_dct[2]) get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y); + if(!skip_dct[3]) get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y); + if(!skip_dct[4]) get_visual_weight(weight[4], ptr_cb , wrap_c); + if(!skip_dct[5]) get_visual_weight(weight[5], ptr_cr , wrap_c); + if(!s->chroma_y_shift){ /* 422 */ + if(!skip_dct[6]) get_visual_weight(weight[6], ptr_cb + (dct_offset>>1), wrap_c); + if(!skip_dct[7]) get_visual_weight(weight[7], ptr_cr + (dct_offset>>1), wrap_c); + } + memcpy(orig[0], s->block[0], sizeof(DCTELEM)*64*mb_block_count); + } + + /* DCT & quantize */ + assert(s->out_format!=FMT_MJPEG || s->qscale==8); + { + for(i=0;iblock_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow); + // FIXME we could decide to change to quantizer instead of clipping + // JS: I don't think that would be a good idea it could lower quality instead + // of improve it. Just INTRADC clipping deserves changes in quantizer + if (overflow) clip_coeffs(s, s->block[i], s->block_last_index[i]); + }else + s->block_last_index[i]= -1; + } + if(s->avctx->quantizer_noise_shaping){ + for(i=0;iblock_last_index[i] = dct_quantize_refine(s, s->block[i], weight[i], orig[i], i, s->qscale); + } + } + } + + if(s->luma_elim_threshold && !s->mb_intra) + for(i=0; i<4; i++) + dct_single_coeff_elimination(s, i, s->luma_elim_threshold); + if(s->chroma_elim_threshold && !s->mb_intra) + for(i=4; ichroma_elim_threshold); + + if(s->flags & CODEC_FLAG_CBP_RD){ + for(i=0;iblock_last_index[i] == -1) + s->coded_score[i]= INT_MAX/256; + } + } + } + + if((s->flags&CODEC_FLAG_GRAY) && s->mb_intra){ + s->block_last_index[4]= + s->block_last_index[5]= 0; + s->block[4][0]= + s->block[5][0]= (1024 + s->c_dc_scale/2)/ s->c_dc_scale; + } + + //non c quantize code returns incorrect block_last_index FIXME + if(s->alternate_scan && s->dct_quantize != dct_quantize_c){ + for(i=0; iblock_last_index[i]>0){ + for(j=63; j>0; j--){ + if(s->block[i][ s->intra_scantable.permutated[j] ]) break; + } + s->block_last_index[i]= j; + } + } + } + + /* huffman encode */ + switch(s->codec_id){ //FIXME funct ptr could be slightly faster + case CODEC_ID_MPEG1VIDEO: + case CODEC_ID_MPEG2VIDEO: + if (ENABLE_MPEG1VIDEO_ENCODER || ENABLE_MPEG2VIDEO_ENCODER) + mpeg1_encode_mb(s, s->block, motion_x, motion_y); + break; + case CODEC_ID_MPEG4: + if (ENABLE_MPEG4_ENCODER) + mpeg4_encode_mb(s, s->block, motion_x, motion_y); + break; + case CODEC_ID_MSMPEG4V2: + case CODEC_ID_MSMPEG4V3: + case CODEC_ID_WMV1: + if (ENABLE_MSMPEG4_ENCODER) + msmpeg4_encode_mb(s, s->block, motion_x, motion_y); + break; + case CODEC_ID_WMV2: + if (ENABLE_WMV2_ENCODER) + ff_wmv2_encode_mb(s, s->block, motion_x, motion_y); + break; + case CODEC_ID_H261: + if (ENABLE_H261_ENCODER) + ff_h261_encode_mb(s, s->block, motion_x, motion_y); + break; + case CODEC_ID_H263: + case CODEC_ID_H263P: + case CODEC_ID_FLV1: + case CODEC_ID_RV10: + case CODEC_ID_RV20: + if (ENABLE_H263_ENCODER || ENABLE_H263P_ENCODER || + ENABLE_FLV_ENCODER || ENABLE_RV10_ENCODER || ENABLE_RV20_ENCODER) + h263_encode_mb(s, s->block, motion_x, motion_y); + break; + case CODEC_ID_MJPEG: + if (ENABLE_MJPEG_ENCODER) + ff_mjpeg_encode_mb(s, s->block); + break; + default: + assert(0); + } +} + +static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y) +{ + if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 6); + else encode_mb_internal(s, motion_x, motion_y, 16, 8); +} + +static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){ + int i; + + memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster then a loop? + + /* mpeg1 */ + d->mb_skip_run= s->mb_skip_run; + for(i=0; i<3; i++) + d->last_dc[i]= s->last_dc[i]; + + /* statistics */ + d->mv_bits= s->mv_bits; + d->i_tex_bits= s->i_tex_bits; + d->p_tex_bits= s->p_tex_bits; + d->i_count= s->i_count; + d->f_count= s->f_count; + d->b_count= s->b_count; + d->skip_count= s->skip_count; + d->misc_bits= s->misc_bits; + d->last_bits= 0; + + d->mb_skipped= 0; + d->qscale= s->qscale; + d->dquant= s->dquant; + + d->esc3_level_length= s->esc3_level_length; +} + +static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){ + int i; + + memcpy(d->mv, s->mv, 2*4*2*sizeof(int)); + memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster then a loop? + + /* mpeg1 */ + d->mb_skip_run= s->mb_skip_run; + for(i=0; i<3; i++) + d->last_dc[i]= s->last_dc[i]; + + /* statistics */ + d->mv_bits= s->mv_bits; + d->i_tex_bits= s->i_tex_bits; + d->p_tex_bits= s->p_tex_bits; + d->i_count= s->i_count; + d->f_count= s->f_count; + d->b_count= s->b_count; + d->skip_count= s->skip_count; + d->misc_bits= s->misc_bits; + + d->mb_intra= s->mb_intra; + d->mb_skipped= s->mb_skipped; + d->mv_type= s->mv_type; + d->mv_dir= s->mv_dir; + d->pb= s->pb; + if(s->data_partitioning){ + d->pb2= s->pb2; + d->tex_pb= s->tex_pb; + } + d->block= s->block; + for(i=0; i<8; i++) + d->block_last_index[i]= s->block_last_index[i]; + d->interlaced_dct= s->interlaced_dct; + d->qscale= s->qscale; + + d->esc3_level_length= s->esc3_level_length; +} + +static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type, + PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2], + int *dmin, int *next_block, int motion_x, int motion_y) +{ + int score; + uint8_t *dest_backup[3]; + + copy_context_before_encode(s, backup, type); + + s->block= s->blocks[*next_block]; + s->pb= pb[*next_block]; + if(s->data_partitioning){ + s->pb2 = pb2 [*next_block]; + s->tex_pb= tex_pb[*next_block]; + } + + if(*next_block){ + memcpy(dest_backup, s->dest, sizeof(s->dest)); + s->dest[0] = s->rd_scratchpad; + s->dest[1] = s->rd_scratchpad + 16*s->linesize; + s->dest[2] = s->rd_scratchpad + 16*s->linesize + 8; + assert(s->linesize >= 32); //FIXME + } + + encode_mb(s, motion_x, motion_y); + + score= put_bits_count(&s->pb); + if(s->data_partitioning){ + score+= put_bits_count(&s->pb2); + score+= put_bits_count(&s->tex_pb); + } + + if(s->avctx->mb_decision == FF_MB_DECISION_RD){ + MPV_decode_mb(s, s->block); + + score *= s->lambda2; + score += sse_mb(s) << FF_LAMBDA_SHIFT; + } + + if(*next_block){ + memcpy(s->dest, dest_backup, sizeof(s->dest)); + } + + if(score<*dmin){ + *dmin= score; + *next_block^=1; + + copy_context_after_encode(best, s, type); + } +} + +static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){ + uint32_t *sq = ff_squareTbl + 256; + int acc=0; + int x,y; + + if(w==16 && h==16) + return s->dsp.sse[0](NULL, src1, src2, stride, 16); + else if(w==8 && h==8) + return s->dsp.sse[1](NULL, src1, src2, stride, 8); + + for(y=0; y=0); + + return acc; +} + +static int sse_mb(MpegEncContext *s){ + int w= 16; + int h= 16; + + if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16; + if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16; + + if(w==16 && h==16) + if(s->avctx->mb_cmp == FF_CMP_NSSE){ + return s->dsp.nsse[0](s, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16) + +s->dsp.nsse[1](s, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8) + +s->dsp.nsse[1](s, s->new_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8); + }else{ + return s->dsp.sse[0](NULL, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16) + +s->dsp.sse[1](NULL, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8) + +s->dsp.sse[1](NULL, s->new_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8); + } + else + return sse(s, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize) + +sse(s, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize) + +sse(s, s->new_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize); +} + +static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){ + MpegEncContext *s= arg; + + + s->me.pre_pass=1; + s->me.dia_size= s->avctx->pre_dia_size; + s->first_slice_line=1; + for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) { + for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) { + ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y); + } + s->first_slice_line=0; + } + + s->me.pre_pass=0; + + return 0; +} + +static int estimate_motion_thread(AVCodecContext *c, void *arg){ + MpegEncContext *s= arg; + + ff_check_alignment(); + + s->me.dia_size= s->avctx->dia_size; + s->first_slice_line=1; + for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) { + s->mb_x=0; //for block init below + ff_init_block_index(s); + for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) { + s->block_index[0]+=2; + s->block_index[1]+=2; + s->block_index[2]+=2; + s->block_index[3]+=2; + + /* compute motion vector & mb_type and store in context */ + if(s->pict_type==FF_B_TYPE) + ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y); + else + ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y); + } + s->first_slice_line=0; + } + return 0; +} + +static int mb_var_thread(AVCodecContext *c, void *arg){ + MpegEncContext *s= arg; + int mb_x, mb_y; + + ff_check_alignment(); + + for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) { + for(mb_x=0; mb_x < s->mb_width; mb_x++) { + int xx = mb_x * 16; + int yy = mb_y * 16; + uint8_t *pix = s->new_picture.data[0] + (yy * s->linesize) + xx; + int varc; + int sum = s->dsp.pix_sum(pix, s->linesize); + + varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)(sum*sum))>>8) + 500 + 128)>>8; + + s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc; + s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8; + s->me.mb_var_sum_temp += varc; + } + } + return 0; +} + +static void write_slice_end(MpegEncContext *s){ + if(ENABLE_MPEG4_ENCODER && s->codec_id==CODEC_ID_MPEG4){ + if(s->partitioned_frame){ + ff_mpeg4_merge_partitions(s); + } + + ff_mpeg4_stuffing(&s->pb); + }else if(ENABLE_MJPEG_ENCODER && s->out_format == FMT_MJPEG){ + ff_mjpeg_encode_stuffing(&s->pb); + } + + align_put_bits(&s->pb); + flush_put_bits(&s->pb); + + if((s->flags&CODEC_FLAG_PASS1) && !s->partitioned_frame) + s->misc_bits+= get_bits_diff(s); +} + +static int encode_thread(AVCodecContext *c, void *arg){ + MpegEncContext *s= arg; + int mb_x, mb_y, pdif = 0; + int chr_h= 16>>s->chroma_y_shift; + int i, j; + MpegEncContext best_s, backup_s; + uint8_t bit_buf[2][MAX_MB_BYTES]; + uint8_t bit_buf2[2][MAX_MB_BYTES]; + uint8_t bit_buf_tex[2][MAX_MB_BYTES]; + PutBitContext pb[2], pb2[2], tex_pb[2]; +//printf("%d->%d\n", s->resync_mb_y, s->end_mb_y); + + ff_check_alignment(); + + for(i=0; i<2; i++){ + init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES); + init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES); + init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES); + } + + s->last_bits= put_bits_count(&s->pb); + s->mv_bits=0; + s->misc_bits=0; + s->i_tex_bits=0; + s->p_tex_bits=0; + s->i_count=0; + s->f_count=0; + s->b_count=0; + s->skip_count=0; + + for(i=0; i<3; i++){ + /* init last dc values */ + /* note: quant matrix value (8) is implied here */ + s->last_dc[i] = 128 << s->intra_dc_precision; + + s->current_picture.error[i] = 0; + } + s->mb_skip_run = 0; + memset(s->last_mv, 0, sizeof(s->last_mv)); + + s->last_mv_dir = 0; + + switch(s->codec_id){ + case CODEC_ID_H263: + case CODEC_ID_H263P: + case CODEC_ID_FLV1: + if (ENABLE_H263_ENCODER || ENABLE_H263P_ENCODER || ENABLE_FLV_ENCODER) + s->gob_index = ff_h263_get_gob_height(s); + break; + case CODEC_ID_MPEG4: + if(ENABLE_MPEG4_ENCODER && s->partitioned_frame) + ff_mpeg4_init_partitions(s); + break; + } + + s->resync_mb_x=0; + s->resync_mb_y=0; + s->first_slice_line = 1; + s->ptr_lastgob = s->pb.buf; + for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) { +// printf("row %d at %X\n", s->mb_y, (int)s); + s->mb_x=0; + s->mb_y= mb_y; + + ff_set_qscale(s, s->qscale); + ff_init_block_index(s); + + for(mb_x=0; mb_x < s->mb_width; mb_x++) { + int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this + int mb_type= s->mb_type[xy]; +// int d; + int dmin= INT_MAX; + int dir; + + if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){ + av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n"); + return -1; + } + if(s->data_partitioning){ + if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES + || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){ + av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n"); + return -1; + } + } + + s->mb_x = mb_x; + s->mb_y = mb_y; // moved into loop, can get changed by H.261 + ff_update_block_index(s); + + if(ENABLE_H261_ENCODER && s->codec_id == CODEC_ID_H261){ + ff_h261_reorder_mb_index(s); + xy= s->mb_y*s->mb_stride + s->mb_x; + mb_type= s->mb_type[xy]; + } + + /* write gob / video packet header */ + if(s->rtp_mode){ + int current_packet_size, is_gob_start; + + current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf); + + is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0; + + if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1; + + switch(s->codec_id){ + case CODEC_ID_H263: + case CODEC_ID_H263P: + if(!s->h263_slice_structured) + if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0; + break; + case CODEC_ID_MPEG2VIDEO: + if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1; + case CODEC_ID_MPEG1VIDEO: + if(s->mb_skip_run) is_gob_start=0; + break; + } + + if(is_gob_start){ + if(s->start_mb_y != mb_y || mb_x!=0){ + write_slice_end(s); + + if(ENABLE_MPEG4_ENCODER && s->codec_id==CODEC_ID_MPEG4 && s->partitioned_frame){ + ff_mpeg4_init_partitions(s); + } + } + + assert((put_bits_count(&s->pb)&7) == 0); + current_packet_size= pbBufPtr(&s->pb) - s->ptr_lastgob; + + if(s->avctx->error_rate && s->resync_mb_x + s->resync_mb_y > 0){ + int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y; + int d= 100 / s->avctx->error_rate; + if(r % d == 0){ + current_packet_size=0; +#ifndef ALT_BITSTREAM_WRITER + s->pb.buf_ptr= s->ptr_lastgob; +#endif + assert(pbBufPtr(&s->pb) == s->ptr_lastgob); + } + } + + if (s->avctx->rtp_callback){ + int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x; + s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb); + } + + switch(s->codec_id){ + case CODEC_ID_MPEG4: + if (ENABLE_MPEG4_ENCODER) { + ff_mpeg4_encode_video_packet_header(s); + ff_mpeg4_clean_buffers(s); + } + break; + case CODEC_ID_MPEG1VIDEO: + case CODEC_ID_MPEG2VIDEO: + if (ENABLE_MPEG1VIDEO_ENCODER || ENABLE_MPEG2VIDEO_ENCODER) { + ff_mpeg1_encode_slice_header(s); + ff_mpeg1_clean_buffers(s); + } + break; + case CODEC_ID_H263: + case CODEC_ID_H263P: + if (ENABLE_H263_ENCODER || ENABLE_H263P_ENCODER) + h263_encode_gob_header(s, mb_y); + break; + } + + if(s->flags&CODEC_FLAG_PASS1){ + int bits= put_bits_count(&s->pb); + s->misc_bits+= bits - s->last_bits; + s->last_bits= bits; + } + + s->ptr_lastgob += current_packet_size; + s->first_slice_line=1; + s->resync_mb_x=mb_x; + s->resync_mb_y=mb_y; + } + } + + if( (s->resync_mb_x == s->mb_x) + && s->resync_mb_y+1 == s->mb_y){ + s->first_slice_line=0; + } + + s->mb_skipped=0; + s->dquant=0; //only for QP_RD + + if(mb_type & (mb_type-1) || (s->flags & CODEC_FLAG_QP_RD)){ // more than 1 MB type possible or CODEC_FLAG_QP_RD + int next_block=0; + int pb_bits_count, pb2_bits_count, tex_pb_bits_count; + + copy_context_before_encode(&backup_s, s, -1); + backup_s.pb= s->pb; + best_s.data_partitioning= s->data_partitioning; + best_s.partitioned_frame= s->partitioned_frame; + if(s->data_partitioning){ + backup_s.pb2= s->pb2; + backup_s.tex_pb= s->tex_pb; + } + + if(mb_type&CANDIDATE_MB_TYPE_INTER){ + s->mv_dir = MV_DIR_FORWARD; + s->mv_type = MV_TYPE_16X16; + s->mb_intra= 0; + s->mv[0][0][0] = s->p_mv_table[xy][0]; + s->mv[0][0][1] = s->p_mv_table[xy][1]; + encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb, + &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]); + } + if(mb_type&CANDIDATE_MB_TYPE_INTER_I){ + s->mv_dir = MV_DIR_FORWARD; + s->mv_type = MV_TYPE_FIELD; + s->mb_intra= 0; + for(i=0; i<2; i++){ + j= s->field_select[0][i] = s->p_field_select_table[i][xy]; + s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0]; + s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1]; + } + encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb, + &dmin, &next_block, 0, 0); + } + if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){ + s->mv_dir = MV_DIR_FORWARD; + s->mv_type = MV_TYPE_16X16; + s->mb_intra= 0; + s->mv[0][0][0] = 0; + s->mv[0][0][1] = 0; + encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb, + &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]); + } + if(mb_type&CANDIDATE_MB_TYPE_INTER4V){ + s->mv_dir = MV_DIR_FORWARD; + s->mv_type = MV_TYPE_8X8; + s->mb_intra= 0; + for(i=0; i<4; i++){ + s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0]; + s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1]; + } + encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb, + &dmin, &next_block, 0, 0); + } + if(mb_type&CANDIDATE_MB_TYPE_FORWARD){ + s->mv_dir = MV_DIR_FORWARD; + s->mv_type = MV_TYPE_16X16; + s->mb_intra= 0; + s->mv[0][0][0] = s->b_forw_mv_table[xy][0]; + s->mv[0][0][1] = s->b_forw_mv_table[xy][1]; + encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb, + &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]); + } + if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){ + s->mv_dir = MV_DIR_BACKWARD; + s->mv_type = MV_TYPE_16X16; + s->mb_intra= 0; + s->mv[1][0][0] = s->b_back_mv_table[xy][0]; + s->mv[1][0][1] = s->b_back_mv_table[xy][1]; + encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb, + &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]); + } + if(mb_type&CANDIDATE_MB_TYPE_BIDIR){ + s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD; + s->mv_type = MV_TYPE_16X16; + s->mb_intra= 0; + s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0]; + s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1]; + s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0]; + s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1]; + encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb, + &dmin, &next_block, 0, 0); + } + if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){ + s->mv_dir = MV_DIR_FORWARD; + s->mv_type = MV_TYPE_FIELD; + s->mb_intra= 0; + for(i=0; i<2; i++){ + j= s->field_select[0][i] = s->b_field_select_table[0][i][xy]; + s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0]; + s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1]; + } + encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb, + &dmin, &next_block, 0, 0); + } + if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){ + s->mv_dir = MV_DIR_BACKWARD; + s->mv_type = MV_TYPE_FIELD; + s->mb_intra= 0; + for(i=0; i<2; i++){ + j= s->field_select[1][i] = s->b_field_select_table[1][i][xy]; + s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0]; + s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1]; + } + encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb, + &dmin, &next_block, 0, 0); + } + if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){ + s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD; + s->mv_type = MV_TYPE_FIELD; + s->mb_intra= 0; + for(dir=0; dir<2; dir++){ + for(i=0; i<2; i++){ + j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy]; + s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0]; + s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1]; + } + } + encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb, + &dmin, &next_block, 0, 0); + } + if(mb_type&CANDIDATE_MB_TYPE_INTRA){ + s->mv_dir = 0; + s->mv_type = MV_TYPE_16X16; + s->mb_intra= 1; + s->mv[0][0][0] = 0; + s->mv[0][0][1] = 0; + encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb, + &dmin, &next_block, 0, 0); + if(s->h263_pred || s->h263_aic){ + if(best_s.mb_intra) + s->mbintra_table[mb_x + mb_y*s->mb_stride]=1; + else + ff_clean_intra_table_entries(s); //old mode? + } + } + + if((s->flags & CODEC_FLAG_QP_RD) && dmin < INT_MAX){ + if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD + const int last_qp= backup_s.qscale; + int qpi, qp, dc[6]; + DCTELEM ac[6][16]; + const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0; + static const int dquant_tab[4]={-1,1,-2,2}; + + assert(backup_s.dquant == 0); + + //FIXME intra + s->mv_dir= best_s.mv_dir; + s->mv_type = MV_TYPE_16X16; + s->mb_intra= best_s.mb_intra; + s->mv[0][0][0] = best_s.mv[0][0][0]; + s->mv[0][0][1] = best_s.mv[0][0][1]; + s->mv[1][0][0] = best_s.mv[1][0][0]; + s->mv[1][0][1] = best_s.mv[1][0][1]; + + qpi = s->pict_type == FF_B_TYPE ? 2 : 0; + for(; qpi<4; qpi++){ + int dquant= dquant_tab[qpi]; + qp= last_qp + dquant; + if(qp < s->avctx->qmin || qp > s->avctx->qmax) + continue; + backup_s.dquant= dquant; + if(s->mb_intra && s->dc_val[0]){ + for(i=0; i<6; i++){ + dc[i]= s->dc_val[0][ s->block_index[i] ]; + memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(DCTELEM)*16); + } + } + + encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb, + &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]); + if(best_s.qscale != qp){ + if(s->mb_intra && s->dc_val[0]){ + for(i=0; i<6; i++){ + s->dc_val[0][ s->block_index[i] ]= dc[i]; + memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(DCTELEM)*16); + } + } + } + } + } + } + if(ENABLE_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){ + int mx= s->b_direct_mv_table[xy][0]; + int my= s->b_direct_mv_table[xy][1]; + + backup_s.dquant = 0; + s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT; + s->mb_intra= 0; + ff_mpeg4_set_direct_mv(s, mx, my); + encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb, + &dmin, &next_block, mx, my); + } + if(ENABLE_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){ + backup_s.dquant = 0; + s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT; + s->mb_intra= 0; + ff_mpeg4_set_direct_mv(s, 0, 0); + encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb, + &dmin, &next_block, 0, 0); + } + if(!best_s.mb_intra && s->flags2&CODEC_FLAG2_SKIP_RD){ + int coded=0; + for(i=0; i<6; i++) + coded |= s->block_last_index[i]; + if(coded){ + int mx,my; + memcpy(s->mv, best_s.mv, sizeof(s->mv)); + if(ENABLE_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){ + mx=my=0; //FIXME find the one we actually used + ff_mpeg4_set_direct_mv(s, mx, my); + }else if(best_s.mv_dir&MV_DIR_BACKWARD){ + mx= s->mv[1][0][0]; + my= s->mv[1][0][1]; + }else{ + mx= s->mv[0][0][0]; + my= s->mv[0][0][1]; + } + + s->mv_dir= best_s.mv_dir; + s->mv_type = best_s.mv_type; + s->mb_intra= 0; +/* s->mv[0][0][0] = best_s.mv[0][0][0]; + s->mv[0][0][1] = best_s.mv[0][0][1]; + s->mv[1][0][0] = best_s.mv[1][0][0]; + s->mv[1][0][1] = best_s.mv[1][0][1];*/ + backup_s.dquant= 0; + s->skipdct=1; + encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb, + &dmin, &next_block, mx, my); + s->skipdct=0; + } + } + + s->current_picture.qscale_table[xy]= best_s.qscale; + + copy_context_after_encode(s, &best_s, -1); + + pb_bits_count= put_bits_count(&s->pb); + flush_put_bits(&s->pb); + ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count); + s->pb= backup_s.pb; + + if(s->data_partitioning){ + pb2_bits_count= put_bits_count(&s->pb2); + flush_put_bits(&s->pb2); + ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count); + s->pb2= backup_s.pb2; + + tex_pb_bits_count= put_bits_count(&s->tex_pb); + flush_put_bits(&s->tex_pb); + ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count); + s->tex_pb= backup_s.tex_pb; + } + s->last_bits= put_bits_count(&s->pb); + + if (ENABLE_ANY_H263_ENCODER && + s->out_format == FMT_H263 && s->pict_type!=FF_B_TYPE) + ff_h263_update_motion_val(s); + + if(next_block==0){ //FIXME 16 vs linesize16 + s->dsp.put_pixels_tab[0][0](s->dest[0], s->rd_scratchpad , s->linesize ,16); + s->dsp.put_pixels_tab[1][0](s->dest[1], s->rd_scratchpad + 16*s->linesize , s->uvlinesize, 8); + s->dsp.put_pixels_tab[1][0](s->dest[2], s->rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8); + } + + if(s->avctx->mb_decision == FF_MB_DECISION_BITS) + MPV_decode_mb(s, s->block); + } else { + int motion_x = 0, motion_y = 0; + s->mv_type=MV_TYPE_16X16; + // only one MB-Type possible + + switch(mb_type){ + case CANDIDATE_MB_TYPE_INTRA: + s->mv_dir = 0; + s->mb_intra= 1; + motion_x= s->mv[0][0][0] = 0; + motion_y= s->mv[0][0][1] = 0; + break; + case CANDIDATE_MB_TYPE_INTER: + s->mv_dir = MV_DIR_FORWARD; + s->mb_intra= 0; + motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0]; + motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1]; + break; + case CANDIDATE_MB_TYPE_INTER_I: + s->mv_dir = MV_DIR_FORWARD; + s->mv_type = MV_TYPE_FIELD; + s->mb_intra= 0; + for(i=0; i<2; i++){ + j= s->field_select[0][i] = s->p_field_select_table[i][xy]; + s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0]; + s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1]; + } + break; + case CANDIDATE_MB_TYPE_INTER4V: + s->mv_dir = MV_DIR_FORWARD; + s->mv_type = MV_TYPE_8X8; + s->mb_intra= 0; + for(i=0; i<4; i++){ + s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0]; + s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1]; + } + break; + case CANDIDATE_MB_TYPE_DIRECT: + if (ENABLE_MPEG4_ENCODER) { + s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT; + s->mb_intra= 0; + motion_x=s->b_direct_mv_table[xy][0]; + motion_y=s->b_direct_mv_table[xy][1]; + ff_mpeg4_set_direct_mv(s, motion_x, motion_y); + } + break; + case CANDIDATE_MB_TYPE_DIRECT0: + if (ENABLE_MPEG4_ENCODER) { + s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT; + s->mb_intra= 0; + ff_mpeg4_set_direct_mv(s, 0, 0); + } + break; + case CANDIDATE_MB_TYPE_BIDIR: + s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD; + s->mb_intra= 0; + s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0]; + s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1]; + s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0]; + s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1]; + break; + case CANDIDATE_MB_TYPE_BACKWARD: + s->mv_dir = MV_DIR_BACKWARD; + s->mb_intra= 0; + motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0]; + motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1]; + break; + case CANDIDATE_MB_TYPE_FORWARD: + s->mv_dir = MV_DIR_FORWARD; + s->mb_intra= 0; + motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0]; + motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1]; +// printf(" %d %d ", motion_x, motion_y); + break; + case CANDIDATE_MB_TYPE_FORWARD_I: + s->mv_dir = MV_DIR_FORWARD; + s->mv_type = MV_TYPE_FIELD; + s->mb_intra= 0; + for(i=0; i<2; i++){ + j= s->field_select[0][i] = s->b_field_select_table[0][i][xy]; + s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0]; + s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1]; + } + break; + case CANDIDATE_MB_TYPE_BACKWARD_I: + s->mv_dir = MV_DIR_BACKWARD; + s->mv_type = MV_TYPE_FIELD; + s->mb_intra= 0; + for(i=0; i<2; i++){ + j= s->field_select[1][i] = s->b_field_select_table[1][i][xy]; + s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0]; + s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1]; + } + break; + case CANDIDATE_MB_TYPE_BIDIR_I: + s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD; + s->mv_type = MV_TYPE_FIELD; + s->mb_intra= 0; + for(dir=0; dir<2; dir++){ + for(i=0; i<2; i++){ + j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy]; + s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0]; + s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1]; + } + } + break; + default: + av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n"); + } + + encode_mb(s, motion_x, motion_y); + + // RAL: Update last macroblock type + s->last_mv_dir = s->mv_dir; + + if (ENABLE_ANY_H263_ENCODER && + s->out_format == FMT_H263 && s->pict_type!=FF_B_TYPE) + ff_h263_update_motion_val(s); + + MPV_decode_mb(s, s->block); + } + + /* clean the MV table in IPS frames for direct mode in B frames */ + if(s->mb_intra /* && I,P,S_TYPE */){ + s->p_mv_table[xy][0]=0; + s->p_mv_table[xy][1]=0; + } + + if(s->flags&CODEC_FLAG_PSNR){ + int w= 16; + int h= 16; + + if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16; + if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16; + + s->current_picture.error[0] += sse( + s, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, + s->dest[0], w, h, s->linesize); + s->current_picture.error[1] += sse( + s, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h, + s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize); + s->current_picture.error[2] += sse( + s, s->new_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h, + s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize); + } + if(s->loop_filter){ + if(ENABLE_ANY_H263_ENCODER && s->out_format == FMT_H263) + ff_h263_loop_filter(s); + } +//printf("MB %d %d bits\n", s->mb_x+s->mb_y*s->mb_stride, put_bits_count(&s->pb)); + } + } + + //not beautiful here but we must write it before flushing so it has to be here + if (ENABLE_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == FF_I_TYPE) + msmpeg4_encode_ext_header(s); + + write_slice_end(s); + + /* Send the last GOB if RTP */ + if (s->avctx->rtp_callback) { + int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x; + pdif = pbBufPtr(&s->pb) - s->ptr_lastgob; + /* Call the RTP callback to send the last GOB */ + emms_c(); + s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb); + } + + return 0; +} + +#define MERGE(field) dst->field += src->field; src->field=0 +static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){ + MERGE(me.scene_change_score); + MERGE(me.mc_mb_var_sum_temp); + MERGE(me.mb_var_sum_temp); +} + +static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){ + int i; + + MERGE(dct_count[0]); //note, the other dct vars are not part of the context + MERGE(dct_count[1]); + MERGE(mv_bits); + MERGE(i_tex_bits); + MERGE(p_tex_bits); + MERGE(i_count); + MERGE(f_count); + MERGE(b_count); + MERGE(skip_count); + MERGE(misc_bits); + MERGE(error_count); + MERGE(padding_bug_score); + MERGE(current_picture.error[0]); + MERGE(current_picture.error[1]); + MERGE(current_picture.error[2]); + + if(dst->avctx->noise_reduction){ + for(i=0; i<64; i++){ + MERGE(dct_error_sum[0][i]); + MERGE(dct_error_sum[1][i]); + } + } + + assert(put_bits_count(&src->pb) % 8 ==0); + assert(put_bits_count(&dst->pb) % 8 ==0); + ff_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb)); + flush_put_bits(&dst->pb); +} + +static int estimate_qp(MpegEncContext *s, int dry_run){ + if (s->next_lambda){ + s->current_picture_ptr->quality= + s->current_picture.quality = s->next_lambda; + if(!dry_run) s->next_lambda= 0; + } else if (!s->fixed_qscale) { + s->current_picture_ptr->quality= + s->current_picture.quality = ff_rate_estimate_qscale(s, dry_run); + if (s->current_picture.quality < 0) + return -1; + } + + if(s->adaptive_quant){ + switch(s->codec_id){ + case CODEC_ID_MPEG4: + if (ENABLE_MPEG4_ENCODER) + ff_clean_mpeg4_qscales(s); + break; + case CODEC_ID_H263: + case CODEC_ID_H263P: + case CODEC_ID_FLV1: + if (ENABLE_H263_ENCODER||ENABLE_H263P_ENCODER||ENABLE_FLV_ENCODER) + ff_clean_h263_qscales(s); + break; + } + + s->lambda= s->lambda_table[0]; + //FIXME broken + }else + s->lambda= s->current_picture.quality; +//printf("%d %d\n", s->avctx->global_quality, s->current_picture.quality); + update_qscale(s); + return 0; +} + +/* must be called before writing the header */ +static void set_frame_distances(MpegEncContext * s){ + assert(s->current_picture_ptr->pts != AV_NOPTS_VALUE); + s->time= s->current_picture_ptr->pts*s->avctx->time_base.num; + + if(s->pict_type==FF_B_TYPE){ + s->pb_time= s->pp_time - (s->last_non_b_time - s->time); + assert(s->pb_time > 0 && s->pb_time < s->pp_time); + }else{ + s->pp_time= s->time - s->last_non_b_time; + s->last_non_b_time= s->time; + assert(s->picture_number==0 || s->pp_time > 0); + } +} + +static int encode_picture(MpegEncContext *s, int picture_number) +{ + int i; + int bits; + + s->picture_number = picture_number; + + /* Reset the average MB variance */ + s->me.mb_var_sum_temp = + s->me.mc_mb_var_sum_temp = 0; + + /* we need to initialize some time vars before we can encode b-frames */ + // RAL: Condition added for MPEG1VIDEO + if (s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->h263_msmpeg4)) + set_frame_distances(s); + if(ENABLE_MPEG4_ENCODER && s->codec_id == CODEC_ID_MPEG4) + ff_set_mpeg4_time(s); + + s->me.scene_change_score=0; + +// s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion + + if(s->pict_type==FF_I_TYPE){ + if(s->msmpeg4_version >= 3) s->no_rounding=1; + else s->no_rounding=0; + }else if(s->pict_type!=FF_B_TYPE){ + if(s->flipflop_rounding || s->codec_id == CODEC_ID_H263P || s->codec_id == CODEC_ID_MPEG4) + s->no_rounding ^= 1; + } + + if(s->flags & CODEC_FLAG_PASS2){ + if (estimate_qp(s,1) < 0) + return -1; + ff_get_2pass_fcode(s); + }else if(!(s->flags & CODEC_FLAG_QSCALE)){ + if(s->pict_type==FF_B_TYPE) + s->lambda= s->last_lambda_for[s->pict_type]; + else + s->lambda= s->last_lambda_for[s->last_non_b_pict_type]; + update_qscale(s); + } + + s->mb_intra=0; //for the rate distortion & bit compare functions + for(i=1; iavctx->thread_count; i++){ + ff_update_duplicate_context(s->thread_context[i], s); + } + + ff_init_me(s); + + /* Estimate motion for every MB */ + if(s->pict_type != FF_I_TYPE){ + s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8; + s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8; + if(s->pict_type != FF_B_TYPE && s->avctx->me_threshold==0){ + if((s->avctx->pre_me && s->last_non_b_pict_type==FF_I_TYPE) || s->avctx->pre_me==2){ + s->avctx->execute(s->avctx, pre_estimate_motion_thread, (void**)&(s->thread_context[0]), NULL, s->avctx->thread_count); + } + } + + s->avctx->execute(s->avctx, estimate_motion_thread, (void**)&(s->thread_context[0]), NULL, s->avctx->thread_count); + }else /* if(s->pict_type == FF_I_TYPE) */{ + /* I-Frame */ + for(i=0; imb_stride*s->mb_height; i++) + s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA; + + if(!s->fixed_qscale){ + /* finding spatial complexity for I-frame rate control */ + s->avctx->execute(s->avctx, mb_var_thread, (void**)&(s->thread_context[0]), NULL, s->avctx->thread_count); + } + } + for(i=1; iavctx->thread_count; i++){ + merge_context_after_me(s, s->thread_context[i]); + } + s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp; + s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp; + emms_c(); + + if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == FF_P_TYPE){ + s->pict_type= FF_I_TYPE; + for(i=0; imb_stride*s->mb_height; i++) + s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA; +//printf("Scene change detected, encoding as I Frame %d %d\n", s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum); + } + + if(!s->umvplus){ + if(s->pict_type==FF_P_TYPE || s->pict_type==FF_S_TYPE) { + s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER); + + if(s->flags & CODEC_FLAG_INTERLACED_ME){ + int a,b; + a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select + b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I); + s->f_code= FFMAX3(s->f_code, a, b); + } + + ff_fix_long_p_mvs(s); + ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0); + if(s->flags & CODEC_FLAG_INTERLACED_ME){ + int j; + for(i=0; i<2; i++){ + for(j=0; j<2; j++) + ff_fix_long_mvs(s, s->p_field_select_table[i], j, + s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0); + } + } + } + + if(s->pict_type==FF_B_TYPE){ + int a, b; + + a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD); + b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR); + s->f_code = FFMAX(a, b); + + a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD); + b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR); + s->b_code = FFMAX(a, b); + + ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1); + ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1); + ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1); + ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1); + if(s->flags & CODEC_FLAG_INTERLACED_ME){ + int dir, j; + for(dir=0; dir<2; dir++){ + for(i=0; i<2; i++){ + for(j=0; j<2; j++){ + int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I) + : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I); + ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j, + s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1); + } + } + } + } + } + } + + if (estimate_qp(s, 0) < 0) + return -1; + + if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==FF_I_TYPE && !(s->flags & CODEC_FLAG_QSCALE)) + s->qscale= 3; //reduce clipping problems + + if (s->out_format == FMT_MJPEG) { + /* for mjpeg, we do include qscale in the matrix */ + s->intra_matrix[0] = ff_mpeg1_default_intra_matrix[0]; + for(i=1;i<64;i++){ + int j= s->dsp.idct_permutation[i]; + + s->intra_matrix[j] = av_clip_uint8((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3); + } + ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16, + s->intra_matrix, s->intra_quant_bias, 8, 8, 1); + s->qscale= 8; + } + + //FIXME var duplication + s->current_picture_ptr->key_frame= + s->current_picture.key_frame= s->pict_type == FF_I_TYPE; //FIXME pic_ptr + s->current_picture_ptr->pict_type= + s->current_picture.pict_type= s->pict_type; + + if(s->current_picture.key_frame) + s->picture_in_gop_number=0; + + s->last_bits= put_bits_count(&s->pb); + switch(s->out_format) { + case FMT_MJPEG: + if (ENABLE_MJPEG_ENCODER) + ff_mjpeg_encode_picture_header(s); + break; + case FMT_H261: + if (ENABLE_H261_ENCODER) + ff_h261_encode_picture_header(s, picture_number); + break; + case FMT_H263: + if (ENABLE_WMV2_ENCODER && s->codec_id == CODEC_ID_WMV2) + ff_wmv2_encode_picture_header(s, picture_number); + else if (ENABLE_MSMPEG4_ENCODER && s->h263_msmpeg4) + msmpeg4_encode_picture_header(s, picture_number); + else if (ENABLE_MPEG4_ENCODER && s->h263_pred) + mpeg4_encode_picture_header(s, picture_number); + else if (ENABLE_RV10_ENCODER && s->codec_id == CODEC_ID_RV10) + rv10_encode_picture_header(s, picture_number); + else if (ENABLE_RV20_ENCODER && s->codec_id == CODEC_ID_RV20) + rv20_encode_picture_header(s, picture_number); + else if (ENABLE_FLV_ENCODER && s->codec_id == CODEC_ID_FLV1) + ff_flv_encode_picture_header(s, picture_number); + else if (ENABLE_ANY_H263_ENCODER) + h263_encode_picture_header(s, picture_number); + break; + case FMT_MPEG1: + if (ENABLE_MPEG1VIDEO_ENCODER || ENABLE_MPEG2VIDEO_ENCODER) + mpeg1_encode_picture_header(s, picture_number); + break; + case FMT_H264: + break; + default: + assert(0); + } + bits= put_bits_count(&s->pb); + s->header_bits= bits - s->last_bits; + + for(i=1; iavctx->thread_count; i++){ + update_duplicate_context_after_me(s->thread_context[i], s); + } + s->avctx->execute(s->avctx, encode_thread, (void**)&(s->thread_context[0]), NULL, s->avctx->thread_count); + for(i=1; iavctx->thread_count; i++){ + merge_context_after_encode(s, s->thread_context[i]); + } + emms_c(); + return 0; +} + +void denoise_dct_c(MpegEncContext *s, DCTELEM *block){ + const int intra= s->mb_intra; + int i; + + s->dct_count[intra]++; + + for(i=0; i<64; i++){ + int level= block[i]; + + if(level){ + if(level>0){ + s->dct_error_sum[intra][i] += level; + level -= s->dct_offset[intra][i]; + if(level<0) level=0; + }else{ + s->dct_error_sum[intra][i] -= level; + level += s->dct_offset[intra][i]; + if(level>0) level=0; + } + block[i]= level; + } + } +} + +int dct_quantize_trellis_c(MpegEncContext *s, + DCTELEM *block, int n, + int qscale, int *overflow){ + const int *qmat; + const uint8_t *scantable= s->intra_scantable.scantable; + const uint8_t *perm_scantable= s->intra_scantable.permutated; + int max=0; + unsigned int threshold1, threshold2; + int bias=0; + int run_tab[65]; + int level_tab[65]; + int score_tab[65]; + int survivor[65]; + int survivor_count; + int last_run=0; + int last_level=0; + int last_score= 0; + int last_i; + int coeff[2][64]; + int coeff_count[64]; + int qmul, qadd, start_i, last_non_zero, i, dc; + const int esc_length= s->ac_esc_length; + uint8_t * length; + uint8_t * last_length; + const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6); + + s->dsp.fdct (block); + + if(s->dct_error_sum) + s->denoise_dct(s, block); + qmul= qscale*16; + qadd= ((qscale-1)|1)*8; + + if (s->mb_intra) { + int q; + if (!s->h263_aic) { + if (n < 4) + q = s->y_dc_scale; + else + q = s->c_dc_scale; + q = q << 3; + } else{ + /* For AIC we skip quant/dequant of INTRADC */ + q = 1 << 3; + qadd=0; + } + + /* note: block[0] is assumed to be positive */ + block[0] = (block[0] + (q >> 1)) / q; + start_i = 1; + last_non_zero = 0; + qmat = s->q_intra_matrix[qscale]; + if(s->mpeg_quant || s->out_format == FMT_MPEG1) + bias= 1<<(QMAT_SHIFT-1); + length = s->intra_ac_vlc_length; + last_length= s->intra_ac_vlc_last_length; + } else { + start_i = 0; + last_non_zero = -1; + qmat = s->q_inter_matrix[qscale]; + length = s->inter_ac_vlc_length; + last_length= s->inter_ac_vlc_last_length; + } + last_i= start_i; + + threshold1= (1<=start_i; i--) { + const int j = scantable[i]; + int level = block[j] * qmat[j]; + + if(((unsigned)(level+threshold1))>threshold2){ + last_non_zero = i; + break; + } + } + + for(i=start_i; i<=last_non_zero; i++) { + const int j = scantable[i]; + int level = block[j] * qmat[j]; + +// if( bias+level >= (1<<(QMAT_SHIFT - 3)) +// || bias-level >= (1<<(QMAT_SHIFT - 3))){ + if(((unsigned)(level+threshold1))>threshold2){ + if(level>0){ + level= (bias + level)>>QMAT_SHIFT; + coeff[0][i]= level; + coeff[1][i]= level-1; +// coeff[2][k]= level-2; + }else{ + level= (bias - level)>>QMAT_SHIFT; + coeff[0][i]= -level; + coeff[1][i]= -level+1; +// coeff[2][k]= -level+2; + } + coeff_count[i]= FFMIN(level, 2); + assert(coeff_count[i]); + max |=level; + }else{ + coeff[0][i]= (level>>31)|1; + coeff_count[i]= 1; + } + } + + *overflow= s->max_qcoeff < max; //overflow might have happened + + if(last_non_zero < start_i){ + memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM)); + return last_non_zero; + } + + score_tab[start_i]= 0; + survivor[0]= start_i; + survivor_count= 1; + + for(i=start_i; i<=last_non_zero; i++){ + int level_index, j, zero_distortion; + int dct_coeff= FFABS(block[ scantable[i] ]); + int best_score=256*256*256*120; + + if ( s->dsp.fdct == fdct_ifast +#ifndef FAAN_POSTSCALE + || s->dsp.fdct == ff_faandct +#endif + ) + dct_coeff= (dct_coeff*inv_aanscales[ scantable[i] ]) >> 12; + zero_distortion= dct_coeff*dct_coeff; + + for(level_index=0; level_index < coeff_count[i]; level_index++){ + int distortion; + int level= coeff[level_index][i]; + const int alevel= FFABS(level); + int unquant_coeff; + + assert(level); + + if(s->out_format == FMT_H263){ + unquant_coeff= alevel*qmul + qadd; + }else{ //MPEG1 + j= s->dsp.idct_permutation[ scantable[i] ]; //FIXME optimize + if(s->mb_intra){ + unquant_coeff = (int)( alevel * qscale * s->intra_matrix[j]) >> 3; + unquant_coeff = (unquant_coeff - 1) | 1; + }else{ + unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4; + unquant_coeff = (unquant_coeff - 1) | 1; + } + unquant_coeff<<= 3; + } + + distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion; + level+=64; + if((level&(~127)) == 0){ + for(j=survivor_count-1; j>=0; j--){ + int run= i - survivor[j]; + int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda; + score += score_tab[i-run]; + + if(score < best_score){ + best_score= score; + run_tab[i+1]= run; + level_tab[i+1]= level-64; + } + } + + if(s->out_format == FMT_H263){ + for(j=survivor_count-1; j>=0; j--){ + int run= i - survivor[j]; + int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda; + score += score_tab[i-run]; + if(score < last_score){ + last_score= score; + last_run= run; + last_level= level-64; + last_i= i+1; + } + } + } + }else{ + distortion += esc_length*lambda; + for(j=survivor_count-1; j>=0; j--){ + int run= i - survivor[j]; + int score= distortion + score_tab[i-run]; + + if(score < best_score){ + best_score= score; + run_tab[i+1]= run; + level_tab[i+1]= level-64; + } + } + + if(s->out_format == FMT_H263){ + for(j=survivor_count-1; j>=0; j--){ + int run= i - survivor[j]; + int score= distortion + score_tab[i-run]; + if(score < last_score){ + last_score= score; + last_run= run; + last_level= level-64; + last_i= i+1; + } + } + } + } + } + + score_tab[i+1]= best_score; + + //Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level + if(last_non_zero <= 27){ + for(; survivor_count; survivor_count--){ + if(score_tab[ survivor[survivor_count-1] ] <= best_score) + break; + } + }else{ + for(; survivor_count; survivor_count--){ + if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda) + break; + } + } + + survivor[ survivor_count++ ]= i+1; + } + + if(s->out_format != FMT_H263){ + last_score= 256*256*256*120; + for(i= survivor[0]; i<=last_non_zero + 1; i++){ + int score= score_tab[i]; + if(i) score += lambda*2; //FIXME exacter? + + if(score < last_score){ + last_score= score; + last_i= i; + last_level= level_tab[i]; + last_run= run_tab[i]; + } + } + } + + s->coded_score[n] = last_score; + + dc= FFABS(block[0]); + last_non_zero= last_i - 1; + memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM)); + + if(last_non_zero < start_i) + return last_non_zero; + + if(last_non_zero == 0 && start_i == 0){ + int best_level= 0; + int best_score= dc * dc; + + for(i=0; iout_format == FMT_H263){ + unquant_coeff= (alevel*qmul + qadd)>>3; + }else{ //MPEG1 + unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[0])) >> 4; + unquant_coeff = (unquant_coeff - 1) | 1; + } + unquant_coeff = (unquant_coeff + 4) >> 3; + unquant_coeff<<= 3 + 3; + + distortion= (unquant_coeff - dc) * (unquant_coeff - dc); + level+=64; + if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda; + else score= distortion + esc_length*lambda; + + if(score < best_score){ + best_score= score; + best_level= level - 64; + } + } + block[0]= best_level; + s->coded_score[n] = best_score - dc*dc; + if(best_level == 0) return -1; + else return last_non_zero; + } + + i= last_i; + assert(last_level); + + block[ perm_scantable[last_non_zero] ]= last_level; + i -= last_run + 1; + + for(; i>start_i; i -= run_tab[i] + 1){ + block[ perm_scantable[i-1] ]= level_tab[i]; + } + + return last_non_zero; +} + +//#define REFINE_STATS 1 +static int16_t basis[64][64]; + +static void build_basis(uint8_t *perm){ + int i, j, x, y; + emms_c(); + for(i=0; i<8; i++){ + for(j=0; j<8; j++){ + for(y=0; y<8; y++){ + for(x=0; x<8; x++){ + double s= 0.25*(1<intra_scantable.scantable; + const uint8_t *perm_scantable= s->intra_scantable.permutated; +// unsigned int threshold1, threshold2; +// int bias=0; + int run_tab[65]; + int prev_run=0; + int prev_level=0; + int qmul, qadd, start_i, last_non_zero, i, dc; + uint8_t * length; + uint8_t * last_length; + int lambda; + int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true +#ifdef REFINE_STATS +static int count=0; +static int after_last=0; +static int to_zero=0; +static int from_zero=0; +static int raise=0; +static int lower=0; +static int messed_sign=0; +#endif + + if(basis[0][0] == 0) + build_basis(s->dsp.idct_permutation); + + qmul= qscale*2; + qadd= (qscale-1)|1; + if (s->mb_intra) { + if (!s->h263_aic) { + if (n < 4) + q = s->y_dc_scale; + else + q = s->c_dc_scale; + } else{ + /* For AIC we skip quant/dequant of INTRADC */ + q = 1; + qadd=0; + } + q <<= RECON_SHIFT-3; + /* note: block[0] is assumed to be positive */ + dc= block[0]*q; +// block[0] = (block[0] + (q >> 1)) / q; + start_i = 1; + qmat = s->q_intra_matrix[qscale]; +// if(s->mpeg_quant || s->out_format == FMT_MPEG1) +// bias= 1<<(QMAT_SHIFT-1); + length = s->intra_ac_vlc_length; + last_length= s->intra_ac_vlc_last_length; + } else { + dc= 0; + start_i = 0; + qmat = s->q_inter_matrix[qscale]; + length = s->inter_ac_vlc_length; + last_length= s->inter_ac_vlc_last_length; + } + last_non_zero = s->block_last_index[n]; + +#ifdef REFINE_STATS +{START_TIMER +#endif + dc += (1<<(RECON_SHIFT-1)); + for(i=0; i<64; i++){ + rem[i]= dc - (orig[i]<0); + assert(w<(1<<6)); + sum += w*w; + } + lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6); +#ifdef REFINE_STATS +{START_TIMER +#endif + run=0; + rle_index=0; + for(i=start_i; i<=last_non_zero; i++){ + int j= perm_scantable[i]; + const int level= block[j]; + int coeff; + + if(level){ + if(level<0) coeff= qmul*level - qadd; + else coeff= qmul*level + qadd; + run_tab[rle_index++]=run; + run=0; + + s->dsp.add_8x8basis(rem, basis[j], coeff); + }else{ + run++; + } + } +#ifdef REFINE_STATS +if(last_non_zero>0){ +STOP_TIMER("init rem[]") +} +} + +{START_TIMER +#endif + for(;;){ + int best_score=s->dsp.try_8x8basis(rem, weight, basis[0], 0); + int best_coeff=0; + int best_change=0; + int run2, best_unquant_change=0, analyze_gradient; +#ifdef REFINE_STATS +{START_TIMER +#endif + analyze_gradient = last_non_zero > 2 || s->avctx->quantizer_noise_shaping >= 3; + + if(analyze_gradient){ +#ifdef REFINE_STATS +{START_TIMER +#endif + for(i=0; i<64; i++){ + int w= weight[i]; + + d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12); + } +#ifdef REFINE_STATS +STOP_TIMER("rem*w*w")} +{START_TIMER +#endif + s->dsp.fdct(d1); +#ifdef REFINE_STATS +STOP_TIMER("dct")} +#endif + } + + if(start_i){ + const int level= block[0]; + int change, old_coeff; + + assert(s->mb_intra); + + old_coeff= q*level; + + for(change=-1; change<=1; change+=2){ + int new_level= level + change; + int score, new_coeff; + + new_coeff= q*new_level; + if(new_coeff >= 2048 || new_coeff < 0) + continue; + + score= s->dsp.try_8x8basis(rem, weight, basis[0], new_coeff - old_coeff); + if(scoreavctx->quantizer_noise_shaping < 3 && i > last_non_zero + 1) + break; + + if(level){ + if(level<0) old_coeff= qmul*level - qadd; + else old_coeff= qmul*level + qadd; + run2= run_tab[rle_index++]; //FIXME ! maybe after last + }else{ + old_coeff=0; + run2--; + assert(run2>=0 || i >= last_non_zero ); + } + + for(change=-1; change<=1; change+=2){ + int new_level= level + change; + int score, new_coeff, unquant_change; + + score=0; + if(s->avctx->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level)) + continue; + + if(new_level){ + if(new_level<0) new_coeff= qmul*new_level - qadd; + else new_coeff= qmul*new_level + qadd; + if(new_coeff >= 2048 || new_coeff <= -2048) + continue; + //FIXME check for overflow + + if(level){ + if(level < 63 && level > -63){ + if(i < last_non_zero) + score += length[UNI_AC_ENC_INDEX(run, new_level+64)] + - length[UNI_AC_ENC_INDEX(run, level+64)]; + else + score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)] + - last_length[UNI_AC_ENC_INDEX(run, level+64)]; + } + }else{ + assert(FFABS(new_level)==1); + + if(analyze_gradient){ + int g= d1[ scantable[i] ]; + if(g && (g^new_level) >= 0) + continue; + } + + if(i < last_non_zero){ + int next_i= i + run2 + 1; + int next_level= block[ perm_scantable[next_i] ] + 64; + + if(next_level&(~127)) + next_level= 0; + + if(next_i < last_non_zero) + score += length[UNI_AC_ENC_INDEX(run, 65)] + + length[UNI_AC_ENC_INDEX(run2, next_level)] + - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]; + else + score += length[UNI_AC_ENC_INDEX(run, 65)] + + last_length[UNI_AC_ENC_INDEX(run2, next_level)] + - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]; + }else{ + score += last_length[UNI_AC_ENC_INDEX(run, 65)]; + if(prev_level){ + score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)] + - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]; + } + } + } + }else{ + new_coeff=0; + assert(FFABS(level)==1); + + if(i < last_non_zero){ + int next_i= i + run2 + 1; + int next_level= block[ perm_scantable[next_i] ] + 64; + + if(next_level&(~127)) + next_level= 0; + + if(next_i < last_non_zero) + score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)] + - length[UNI_AC_ENC_INDEX(run2, next_level)] + - length[UNI_AC_ENC_INDEX(run, 65)]; + else + score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)] + - last_length[UNI_AC_ENC_INDEX(run2, next_level)] + - length[UNI_AC_ENC_INDEX(run, 65)]; + }else{ + score += -last_length[UNI_AC_ENC_INDEX(run, 65)]; + if(prev_level){ + score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)] + - length[UNI_AC_ENC_INDEX(prev_run, prev_level)]; + } + } + } + + score *= lambda; + + unquant_change= new_coeff - old_coeff; + assert((score < 100*lambda && score > -100*lambda) || lambda==0); + + score+= s->dsp.try_8x8basis(rem, weight, basis[j], unquant_change); + if(score last_non_zero){ + last_non_zero= best_coeff; + assert(block[j]); +#ifdef REFINE_STATS +after_last++; +#endif + }else{ +#ifdef REFINE_STATS +if(block[j]){ + if(block[j] - best_change){ + if(FFABS(block[j]) > FFABS(block[j] - best_change)){ + raise++; + }else{ + lower++; + } + }else{ + from_zero++; + } +}else{ + to_zero++; +} +#endif + for(; last_non_zero>=start_i; last_non_zero--){ + if(block[perm_scantable[last_non_zero]]) + break; + } + } +#ifdef REFINE_STATS +count++; +if(256*256*256*64 % count == 0){ + printf("after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number); +} +#endif + run=0; + rle_index=0; + for(i=start_i; i<=last_non_zero; i++){ + int j= perm_scantable[i]; + const int level= block[j]; + + if(level){ + run_tab[rle_index++]=run; + run=0; + }else{ + run++; + } + } + + s->dsp.add_8x8basis(rem, basis[j], best_unquant_change); + }else{ + break; + } + } +#ifdef REFINE_STATS +if(last_non_zero>0){ +STOP_TIMER("iterative search") +} +} +#endif + + return last_non_zero; +} + +int dct_quantize_c(MpegEncContext *s, + DCTELEM *block, int n, + int qscale, int *overflow) +{ + int i, j, level, last_non_zero, q, start_i; + const int *qmat; + const uint8_t *scantable= s->intra_scantable.scantable; + int bias; + int max=0; + unsigned int threshold1, threshold2; + + s->dsp.fdct (block); + + if(s->dct_error_sum) + s->denoise_dct(s, block); + + if (s->mb_intra) { + if (!s->h263_aic) { + if (n < 4) + q = s->y_dc_scale; + else + q = s->c_dc_scale; + q = q << 3; + } else + /* For AIC we skip quant/dequant of INTRADC */ + q = 1 << 3; + + /* note: block[0] is assumed to be positive */ + block[0] = (block[0] + (q >> 1)) / q; + start_i = 1; + last_non_zero = 0; + qmat = s->q_intra_matrix[qscale]; + bias= s->intra_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT); + } else { + start_i = 0; + last_non_zero = -1; + qmat = s->q_inter_matrix[qscale]; + bias= s->inter_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT); + } + threshold1= (1<=start_i;i--) { + j = scantable[i]; + level = block[j] * qmat[j]; + + if(((unsigned)(level+threshold1))>threshold2){ + last_non_zero = i; + break; + }else{ + block[j]=0; + } + } + for(i=start_i; i<=last_non_zero; i++) { + j = scantable[i]; + level = block[j] * qmat[j]; + +// if( bias+level >= (1<= (1<threshold2){ + if(level>0){ + level= (bias + level)>>QMAT_SHIFT; + block[j]= level; + }else{ + level= (bias - level)>>QMAT_SHIFT; + block[j]= -level; + } + max |=level; + }else{ + block[j]=0; + } + } + *overflow= s->max_qcoeff < max; //overflow might have happened + + /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */ + if (s->dsp.idct_permutation_type != FF_NO_IDCT_PERM) + ff_block_permute(block, s->dsp.idct_permutation, scantable, last_non_zero); + + return last_non_zero; +} + +AVCodec h263_encoder = { + "h263", + CODEC_TYPE_VIDEO, + CODEC_ID_H263, + sizeof(MpegEncContext), + MPV_encode_init, + MPV_encode_picture, + MPV_encode_end, + .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, + .long_name= NULL_IF_CONFIG_SMALL("H.263"), +}; + +AVCodec h263p_encoder = { + "h263p", + CODEC_TYPE_VIDEO, + CODEC_ID_H263P, + sizeof(MpegEncContext), + MPV_encode_init, + MPV_encode_picture, + MPV_encode_end, + .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, + .long_name= NULL_IF_CONFIG_SMALL("H.263+ / H.263 version 2"), +}; + +AVCodec flv_encoder = { + "flv", + CODEC_TYPE_VIDEO, + CODEC_ID_FLV1, + sizeof(MpegEncContext), + MPV_encode_init, + MPV_encode_picture, + MPV_encode_end, + .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, + .long_name= NULL_IF_CONFIG_SMALL("Flash Video"), +}; + +AVCodec rv10_encoder = { + "rv10", + CODEC_TYPE_VIDEO, + CODEC_ID_RV10, + sizeof(MpegEncContext), + MPV_encode_init, + MPV_encode_picture, + MPV_encode_end, + .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, + .long_name= NULL_IF_CONFIG_SMALL("RealVideo 1.0"), +}; + +AVCodec rv20_encoder = { + "rv20", + CODEC_TYPE_VIDEO, + CODEC_ID_RV20, + sizeof(MpegEncContext), + MPV_encode_init, + MPV_encode_picture, + MPV_encode_end, + .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, + .long_name= NULL_IF_CONFIG_SMALL("RealVideo 2.0"), +}; + +AVCodec mpeg4_encoder = { + "mpeg4", + CODEC_TYPE_VIDEO, + CODEC_ID_MPEG4, + sizeof(MpegEncContext), + MPV_encode_init, + MPV_encode_picture, + MPV_encode_end, + .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, + .capabilities= CODEC_CAP_DELAY, + .long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2"), +}; + +AVCodec msmpeg4v1_encoder = { + "msmpeg4v1", + CODEC_TYPE_VIDEO, + CODEC_ID_MSMPEG4V1, + sizeof(MpegEncContext), + MPV_encode_init, + MPV_encode_picture, + MPV_encode_end, + .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, + .long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 1"), +}; + +AVCodec msmpeg4v2_encoder = { + "msmpeg4v2", + CODEC_TYPE_VIDEO, + CODEC_ID_MSMPEG4V2, + sizeof(MpegEncContext), + MPV_encode_init, + MPV_encode_picture, + MPV_encode_end, + .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, + .long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"), +}; + +AVCodec msmpeg4v3_encoder = { + "msmpeg4", + CODEC_TYPE_VIDEO, + CODEC_ID_MSMPEG4V3, + sizeof(MpegEncContext), + MPV_encode_init, + MPV_encode_picture, + MPV_encode_end, + .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, + .long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"), +}; + +AVCodec wmv1_encoder = { + "wmv1", + CODEC_TYPE_VIDEO, + CODEC_ID_WMV1, + sizeof(MpegEncContext), + MPV_encode_init, + MPV_encode_picture, + MPV_encode_end, + .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, + .long_name= NULL_IF_CONFIG_SMALL("Windows Media Video 7"), +}; diff --git a/src/add-ons/media/plugins/avcodec/libavcodec/mpegvideo_parser.c b/src/add-ons/media/plugins/avcodec/libavcodec/mpegvideo_parser.c new file mode 100644 index 0000000000..30f149d547 --- /dev/null +++ b/src/add-ons/media/plugins/avcodec/libavcodec/mpegvideo_parser.c @@ -0,0 +1,182 @@ +/* + * MPEG1 / MPEG2 video parser + * Copyright (c) 2000,2001 Fabrice Bellard. + * Copyright (c) 2002-2004 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "parser.h" +#include "mpegvideo.h" + +static void mpegvideo_extract_headers(AVCodecParserContext *s, + AVCodecContext *avctx, + const uint8_t *buf, int buf_size) +{ + ParseContext1 *pc = s->priv_data; + const uint8_t *buf_end; + const uint8_t *buf_start= buf; + uint32_t start_code; + int frame_rate_index, ext_type, bytes_left; + int frame_rate_ext_n, frame_rate_ext_d; + int picture_structure, top_field_first, repeat_first_field, progressive_frame; + int horiz_size_ext, vert_size_ext, bit_rate_ext; +//FIXME replace the crap with get_bits() + s->repeat_pict = 0; + buf_end = buf + buf_size; + while (buf < buf_end) { + start_code= -1; + buf= ff_find_start_code(buf, buf_end, &start_code); + bytes_left = buf_end - buf; + switch(start_code) { + case PICTURE_START_CODE: + ff_fetch_timestamp(s, buf-buf_start-4, 1); + + if (bytes_left >= 2) { + s->pict_type = (buf[1] >> 3) & 7; + } + break; + case SEQ_START_CODE: + if (bytes_left >= 7) { + pc->width = (buf[0] << 4) | (buf[1] >> 4); + pc->height = ((buf[1] & 0x0f) << 8) | buf[2]; + avcodec_set_dimensions(avctx, pc->width, pc->height); + frame_rate_index = buf[3] & 0xf; + pc->frame_rate.den = avctx->time_base.den = ff_frame_rate_tab[frame_rate_index].num; + pc->frame_rate.num = avctx->time_base.num = ff_frame_rate_tab[frame_rate_index].den; + avctx->bit_rate = ((buf[4]<<10) | (buf[5]<<2) | (buf[6]>>6))*400; + avctx->codec_id = CODEC_ID_MPEG1VIDEO; + avctx->sub_id = 1; + } + break; + case EXT_START_CODE: + if (bytes_left >= 1) { + ext_type = (buf[0] >> 4); + switch(ext_type) { + case 0x1: /* sequence extension */ + if (bytes_left >= 6) { + horiz_size_ext = ((buf[1] & 1) << 1) | (buf[2] >> 7); + vert_size_ext = (buf[2] >> 5) & 3; + bit_rate_ext = ((buf[2] & 0x1F)<<7) | (buf[3]>>1); + frame_rate_ext_n = (buf[5] >> 5) & 3; + frame_rate_ext_d = (buf[5] & 0x1f); + pc->progressive_sequence = buf[1] & (1 << 3); + avctx->has_b_frames= !(buf[5] >> 7); + + pc->width |=(horiz_size_ext << 12); + pc->height |=( vert_size_ext << 12); + avctx->bit_rate += (bit_rate_ext << 18) * 400; + avcodec_set_dimensions(avctx, pc->width, pc->height); + avctx->time_base.den = pc->frame_rate.den * (frame_rate_ext_n + 1); + avctx->time_base.num = pc->frame_rate.num * (frame_rate_ext_d + 1); + avctx->codec_id = CODEC_ID_MPEG2VIDEO; + avctx->sub_id = 2; /* forces MPEG2 */ + } + break; + case 0x8: /* picture coding extension */ + if (bytes_left >= 5) { + picture_structure = buf[2]&3; + top_field_first = buf[3] & (1 << 7); + repeat_first_field = buf[3] & (1 << 1); + progressive_frame = buf[4] & (1 << 7); + + /* check if we must repeat the frame */ + if (repeat_first_field) { + if (pc->progressive_sequence) { + if (top_field_first) + s->repeat_pict = 4; + else + s->repeat_pict = 2; + } else if (progressive_frame) { + s->repeat_pict = 1; + } + } + } + break; + } + } + break; + case -1: + goto the_end; + default: + /* we stop parsing when we encounter a slice. It ensures + that this function takes a negligible amount of time */ + if (start_code >= SLICE_MIN_START_CODE && + start_code <= SLICE_MAX_START_CODE) + goto the_end; + break; + } + } + the_end: ; +} + +static int mpegvideo_parse(AVCodecParserContext *s, + AVCodecContext *avctx, + const uint8_t **poutbuf, int *poutbuf_size, + const uint8_t *buf, int buf_size) +{ + ParseContext1 *pc1 = s->priv_data; + ParseContext *pc= &pc1->pc; + int next; + + if(s->flags & PARSER_FLAG_COMPLETE_FRAMES){ + next= buf_size; + }else{ + next= ff_mpeg1_find_frame_end(pc, buf, buf_size); + + if (ff_combine_frame(pc, next, &buf, &buf_size) < 0) { + *poutbuf = NULL; + *poutbuf_size = 0; + return buf_size; + } + + } + /* we have a full frame : we just parse the first few MPEG headers + to have the full timing information. The time take by this + function should be negligible for uncorrupted streams */ + mpegvideo_extract_headers(s, avctx, buf, buf_size); +#if 0 + printf("pict_type=%d frame_rate=%0.3f repeat_pict=%d\n", + s->pict_type, (double)avctx->time_base.den / avctx->time_base.num, s->repeat_pict); +#endif + + *poutbuf = buf; + *poutbuf_size = buf_size; + return next; +} + +static int mpegvideo_split(AVCodecContext *avctx, + const uint8_t *buf, int buf_size) +{ + int i; + uint32_t state= -1; + + for(i=0; i= 0x100) + return i-3; + } + return 0; +} + +AVCodecParser mpegvideo_parser = { + { CODEC_ID_MPEG1VIDEO, CODEC_ID_MPEG2VIDEO }, + sizeof(ParseContext1), + NULL, + mpegvideo_parse, + ff_parse1_close, +};